^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2015 Red Hat. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "dm-cache-background-tracker.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "dm-cache-policy-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "dm-cache-policy.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "dm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/math64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define DM_MSG_PREFIX "cache-policy-smq"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Safe division functions that return zero on divide by zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static unsigned safe_div(unsigned n, unsigned d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return d ? n / d : 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static unsigned safe_mod(unsigned n, unsigned d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return d ? n % d : 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned hash_next:28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned prev:28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned next:28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned level:6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) bool dirty:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) bool allocated:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) bool sentinel:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) bool pending_work:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) dm_oblock_t oblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define INDEXER_NULL ((1u << 28u) - 1u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * An entry_space manages a set of entries that we use for the queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * The clean and dirty queues share entries, so this object is separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * from the queue itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct entry_space {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct entry *begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct entry *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static int space_init(struct entry_space *es, unsigned nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (!nr_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) es->begin = es->end = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) es->begin = vzalloc(array_size(nr_entries, sizeof(struct entry)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!es->begin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) es->end = es->begin + nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static void space_exit(struct entry_space *es)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) vfree(es->begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static struct entry *__get_entry(struct entry_space *es, unsigned block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) e = es->begin + block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) BUG_ON(e >= es->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static unsigned to_index(struct entry_space *es, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) BUG_ON(e < es->begin || e >= es->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return e - es->begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static struct entry *to_entry(struct entry_space *es, unsigned block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (block == INDEXER_NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return __get_entry(es, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct ilist {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned nr_elts; /* excluding sentinel entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned head, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void l_init(struct ilist *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) l->nr_elts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) l->head = l->tail = INDEXER_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static struct entry *l_head(struct entry_space *es, struct ilist *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return to_entry(es, l->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static struct entry *l_tail(struct entry_space *es, struct ilist *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return to_entry(es, l->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static struct entry *l_next(struct entry_space *es, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return to_entry(es, e->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static struct entry *l_prev(struct entry_space *es, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return to_entry(es, e->prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static bool l_empty(struct ilist *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return l->head == INDEXER_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct entry *head = l_head(es, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) e->next = l->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) e->prev = INDEXER_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) head->prev = l->head = to_index(es, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) l->head = l->tail = to_index(es, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (!e->sentinel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) l->nr_elts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct entry *tail = l_tail(es, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) e->next = INDEXER_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) e->prev = l->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) tail->next = l->tail = to_index(es, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) l->head = l->tail = to_index(es, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!e->sentinel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) l->nr_elts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void l_add_before(struct entry_space *es, struct ilist *l,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct entry *old, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct entry *prev = l_prev(es, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) l_add_head(es, l, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) e->prev = old->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) e->next = to_index(es, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) prev->next = old->prev = to_index(es, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (!e->sentinel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) l->nr_elts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct entry *prev = l_prev(es, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct entry *next = l_next(es, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) prev->next = e->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) l->head = e->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) next->prev = e->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) l->tail = e->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!e->sentinel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) l->nr_elts--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) for (e = l_head(es, l); e; e = l_next(es, e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!e->sentinel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) l_del(es, l, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) for (e = l_tail(es, l); e; e = l_prev(es, e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!e->sentinel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) l_del(es, l, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * The stochastic-multi-queue is a set of lru lists stacked into levels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Entries are moved up levels when they are used, which loosely orders the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * most accessed entries in the top levels and least in the bottom. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * structure is *much* better than a single lru list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #define MAX_LEVELS 64u
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct entry_space *es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned nr_elts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) unsigned nr_levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct ilist qs[MAX_LEVELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * We maintain a count of the number of entries we would like in each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned last_target_nr_elts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned nr_top_levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned nr_in_top_levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned target_count[MAX_LEVELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) q->es = es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) q->nr_elts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) q->nr_levels = nr_levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) for (i = 0; i < q->nr_levels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) l_init(q->qs + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) q->target_count[i] = 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) q->last_target_nr_elts = 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) q->nr_top_levels = 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) q->nr_in_top_levels = 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static unsigned q_size(struct queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return q->nr_elts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Insert an entry to the back of the given level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static void q_push(struct queue *q, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) BUG_ON(e->pending_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!e->sentinel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) q->nr_elts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) l_add_tail(q->es, q->qs + e->level, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void q_push_front(struct queue *q, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) BUG_ON(e->pending_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!e->sentinel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) q->nr_elts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) l_add_head(q->es, q->qs + e->level, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) BUG_ON(e->pending_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (!e->sentinel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) q->nr_elts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) l_add_before(q->es, q->qs + e->level, old, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static void q_del(struct queue *q, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) l_del(q->es, q->qs + e->level, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!e->sentinel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) q->nr_elts--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * Return the oldest entry of the lowest populated level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) max_level = min(max_level, q->nr_levels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) for (level = 0; level < max_level; level++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (e->sentinel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (can_cross_sentinel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static struct entry *q_pop(struct queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct entry *e = q_peek(q, q->nr_levels, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) q_del(q, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * This function assumes there is a non-sentinel entry to pop. It's only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * used by redistribute, so we know this is true. It also doesn't adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * the q->nr_elts count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static struct entry *__redist_pop_from(struct queue *q, unsigned level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) for (; level < q->nr_levels; level++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (!e->sentinel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) l_del(q->es, q->qs + e->level, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) unsigned level, nr_levels, entries_per_level, remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) BUG_ON(lbegin > lend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) BUG_ON(lend > q->nr_levels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) nr_levels = lend - lbegin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) entries_per_level = safe_div(nr_elts, nr_levels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) remainder = safe_mod(nr_elts, nr_levels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) for (level = lbegin; level < lend; level++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) q->target_count[level] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) (level < (lbegin + remainder)) ? entries_per_level + 1u : entries_per_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * Typically we have fewer elements in the top few levels which allows us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * to adjust the promote threshold nicely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static void q_set_targets(struct queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (q->last_target_nr_elts == q->nr_elts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) q->last_target_nr_elts = q->nr_elts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (q->nr_top_levels > q->nr_levels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) q_set_targets_subrange_(q, q->nr_in_top_levels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) q->nr_levels - q->nr_top_levels, q->nr_levels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (q->nr_in_top_levels < q->nr_elts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 0, q->nr_levels - q->nr_top_levels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void q_redistribute(struct queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) unsigned target, level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct ilist *l, *l_above;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) q_set_targets(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) for (level = 0u; level < q->nr_levels - 1u; level++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) l = q->qs + level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) target = q->target_count[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * Pull down some entries from the level above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) while (l->nr_elts < target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) e = __redist_pop_from(q, level + 1u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* bug in nr_elts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) e->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) l_add_tail(q->es, l, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Push some entries up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) l_above = q->qs + level + 1u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) while (l->nr_elts > target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) e = l_pop_tail(q->es, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* bug in nr_elts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) e->level = level + 1u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) l_add_tail(q->es, l_above, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct entry *s1, struct entry *s2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct entry *de;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) unsigned sentinels_passed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* try and find an entry to swap with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (extra_levels && (e->level < q->nr_levels - 1u)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) sentinels_passed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (de) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) q_del(q, de);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) de->level = e->level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (s1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) switch (sentinels_passed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) q_push_before(q, s1, de);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) q_push_before(q, s2, de);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) q_push(q, de);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) q_push(q, de);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) q_del(q, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) e->level = new_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) q_push(q, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) #define FP_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) #define SIXTEENTH (1u << (FP_SHIFT - 4u))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #define EIGHTH (1u << (FP_SHIFT - 3u))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) unsigned hit_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) unsigned hits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) unsigned misses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) enum performance {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) Q_POOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) Q_FAIR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) Q_WELL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static void stats_init(struct stats *s, unsigned nr_levels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) s->hit_threshold = (nr_levels * 3u) / 4u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) s->hits = 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) s->misses = 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void stats_reset(struct stats *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) s->hits = s->misses = 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static void stats_level_accessed(struct stats *s, unsigned level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (level >= s->hit_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) s->hits++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) s->misses++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static void stats_miss(struct stats *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) s->misses++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * There are times when we don't have any confidence in the hotspot queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * Such as when a fresh cache is created and the blocks have been spread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * out across the levels, or if an io load changes. We detect this by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * seeing how often a lookup is in the top levels of the hotspot queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static enum performance stats_assess(struct stats *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) unsigned confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (confidence < SIXTEENTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return Q_POOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) else if (confidence < EIGHTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return Q_FAIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return Q_WELL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct smq_hash_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct entry_space *es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) unsigned long long hash_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) unsigned *buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * All cache entries are stored in a chained hash table. To save space we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * use indexing again, and only store indexes to the next entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unsigned i, nr_buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ht->es = es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ht->hash_bits = __ffs(nr_buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (!ht->buckets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) for (i = 0; i < nr_buckets; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ht->buckets[i] = INDEXER_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static void h_exit(struct smq_hash_table *ht)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) vfree(ht->buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return to_entry(ht->es, ht->buckets[bucket]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return to_entry(ht->es, e->hash_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) e->hash_next = ht->buckets[bucket];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ht->buckets[bucket] = to_index(ht->es, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static void h_insert(struct smq_hash_table *ht, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) __h_insert(ht, h, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock_t oblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct entry **prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) for (e = h_head(ht, h); e; e = h_next(ht, e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (e->oblock == oblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) *prev = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static void __h_unlink(struct smq_hash_table *ht, unsigned h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct entry *e, struct entry *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) prev->hash_next = e->hash_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ht->buckets[h] = e->hash_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * Also moves each entry to the front of the bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct entry *e, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) unsigned h = hash_64(from_oblock(oblock), ht->hash_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) e = __h_lookup(ht, h, oblock, &prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (e && prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * Move to the front because this entry is likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * to be hit again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) __h_unlink(ht, h, e, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) __h_insert(ht, h, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static void h_remove(struct smq_hash_table *ht, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct entry *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * The down side of using a singly linked list is we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * iterate the bucket to remove an item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) e = __h_lookup(ht, h, e->oblock, &prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) __h_unlink(ht, h, e, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct entry_alloc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct entry_space *es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) unsigned begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) unsigned nr_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct ilist free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) unsigned begin, unsigned end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ea->es = es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ea->nr_allocated = 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ea->begin = begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) l_init(&ea->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) for (i = begin; i != end; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static void init_entry(struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * We can't memset because that would clear the hotspot and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * sentinel bits which remain constant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) e->hash_next = INDEXER_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) e->next = INDEXER_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) e->prev = INDEXER_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) e->level = 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) e->dirty = true; /* FIXME: audit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) e->allocated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) e->sentinel = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) e->pending_work = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static struct entry *alloc_entry(struct entry_alloc *ea)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (l_empty(&ea->free))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) e = l_pop_head(ea->es, &ea->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) init_entry(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ea->nr_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * This assumes the cblock hasn't already been allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct entry *e = __get_entry(ea->es, ea->begin + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) BUG_ON(e->allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) l_del(ea->es, &ea->free, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) init_entry(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ea->nr_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static void free_entry(struct entry_alloc *ea, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) BUG_ON(!ea->nr_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) BUG_ON(!e->allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ea->nr_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) e->allocated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) l_add_tail(ea->es, &ea->free, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static bool allocator_empty(struct entry_alloc *ea)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return l_empty(&ea->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static unsigned get_index(struct entry_alloc *ea, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return to_index(ea->es, e) - ea->begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static struct entry *get_entry(struct entry_alloc *ea, unsigned index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return __get_entry(ea->es, ea->begin + index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) #define NR_HOTSPOT_LEVELS 64u
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) #define NR_CACHE_LEVELS 64u
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) #define WRITEBACK_PERIOD (10ul * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) #define DEMOTE_PERIOD (60ul * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) #define HOTSPOT_UPDATE_PERIOD (HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) #define CACHE_UPDATE_PERIOD (60ul * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct smq_policy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct dm_cache_policy policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* protects everything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) dm_cblock_t cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) sector_t cache_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) sector_t hotspot_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) unsigned nr_hotspot_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) unsigned cache_blocks_per_hotspot_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) unsigned hotspot_level_jump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct entry_space es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct entry_alloc writeback_sentinel_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct entry_alloc demote_sentinel_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct entry_alloc hotspot_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct entry_alloc cache_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) unsigned long *hotspot_hit_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) unsigned long *cache_hit_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * We maintain three queues of entries. The cache proper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * consisting of a clean and dirty queue, containing the currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * active mappings. The hotspot queue uses a larger block size to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * track blocks that are being hit frequently and potential
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * candidates for promotion to the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct queue hotspot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct queue clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct queue dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct stats hotspot_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct stats cache_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * Keeps track of time, incremented by the core. We use this to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * avoid attributing multiple hits within the same tick.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) unsigned tick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * The hash tables allows us to quickly find an entry by origin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct smq_hash_table table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct smq_hash_table hotspot_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) bool current_writeback_sentinels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) unsigned long next_writeback_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) bool current_demote_sentinels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) unsigned long next_demote_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) unsigned write_promote_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) unsigned read_promote_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) unsigned long next_hotspot_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) unsigned long next_cache_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct background_tracker *bg_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) bool migrations_allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static void __update_writeback_sentinels(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) unsigned level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct queue *q = &mq->dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct entry *sentinel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) for (level = 0; level < q->nr_levels; level++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) sentinel = writeback_sentinel(mq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) q_del(q, sentinel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) q_push(q, sentinel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static void __update_demote_sentinels(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) unsigned level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct queue *q = &mq->clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct entry *sentinel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) for (level = 0; level < q->nr_levels; level++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) sentinel = demote_sentinel(mq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) q_del(q, sentinel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) q_push(q, sentinel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static void update_sentinels(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (time_after(jiffies, mq->next_writeback_period)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) __update_writeback_sentinels(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (time_after(jiffies, mq->next_demote_period)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) mq->next_demote_period = jiffies + DEMOTE_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) mq->current_demote_sentinels = !mq->current_demote_sentinels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) __update_demote_sentinels(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) static void __sentinels_init(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) unsigned level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct entry *sentinel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) for (level = 0; level < NR_CACHE_LEVELS; level++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) sentinel = writeback_sentinel(mq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) sentinel->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) q_push(&mq->dirty, sentinel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) sentinel = demote_sentinel(mq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) sentinel->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) q_push(&mq->clean, sentinel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) static void sentinels_init(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) mq->next_demote_period = jiffies + DEMOTE_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) mq->current_writeback_sentinels = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) mq->current_demote_sentinels = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) __sentinels_init(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) mq->current_demote_sentinels = !mq->current_demote_sentinels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) __sentinels_init(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static void del_queue(struct smq_policy *mq, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) q_del(e->dirty ? &mq->dirty : &mq->clean, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) static void push_queue(struct smq_policy *mq, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (e->dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) q_push(&mq->dirty, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) q_push(&mq->clean, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) // !h, !q, a -> h, q, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static void push(struct smq_policy *mq, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) h_insert(&mq->table, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (!e->pending_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) push_queue(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static void push_queue_front(struct smq_policy *mq, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (e->dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) q_push_front(&mq->dirty, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) q_push_front(&mq->clean, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static void push_front(struct smq_policy *mq, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) h_insert(&mq->table, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (!e->pending_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) push_queue_front(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return to_cblock(get_index(&mq->cache_alloc, e));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static void requeue(struct smq_policy *mq, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * Pending work has temporarily been taken out of the queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (e->pending_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (!e->dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) q_requeue(&mq->clean, e, 1u, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) q_requeue(&mq->dirty, e, 1u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static unsigned default_promote_level(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * The promote level depends on the current performance of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * If the cache is performing badly, then we can't afford
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * to promote much without causing performance to drop below that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * of the origin device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * If the cache is performing well, then we don't need to promote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * much. If it isn't broken, don't fix it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * If the cache is middling then we promote more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * This scheme reminds me of a graph of entropy vs probability of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * binary variable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static unsigned table[] = {1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) unsigned hits = mq->cache_stats.hits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) unsigned misses = mq->cache_stats.misses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) unsigned index = safe_div(hits << 4u, hits + misses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return table[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static void update_promote_levels(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * If there are unused cache entries then we want to be really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * eager to promote.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * If the hotspot queue is performing badly then we have little
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * confidence that we know which blocks to promote. So we cut down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * the amount of promotions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) switch (stats_assess(&mq->hotspot_stats)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) case Q_POOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) threshold_level /= 4u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) case Q_FAIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) threshold_level /= 2u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) case Q_WELL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * If the hotspot queue is performing badly, then we try and move entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * around more quickly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static void update_level_jump(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) switch (stats_assess(&mq->hotspot_stats)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) case Q_POOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) mq->hotspot_level_jump = 4u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) case Q_FAIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) mq->hotspot_level_jump = 2u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) case Q_WELL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) mq->hotspot_level_jump = 1u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static void end_hotspot_period(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) update_promote_levels(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (time_after(jiffies, mq->next_hotspot_period)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) update_level_jump(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) q_redistribute(&mq->hotspot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) stats_reset(&mq->hotspot_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static void end_cache_period(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (time_after(jiffies, mq->next_cache_period)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) q_redistribute(&mq->dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) q_redistribute(&mq->clean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) stats_reset(&mq->cache_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * Targets are given as a percentage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) #define CLEAN_TARGET 25u
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) #define FREE_TARGET 25u
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static unsigned percent_to_target(struct smq_policy *mq, unsigned p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return from_cblock(mq->cache_size) * p / 100u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static bool clean_target_met(struct smq_policy *mq, bool idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * Cache entries may not be populated. So we cannot rely on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * size of the clean queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * We'd like to clean everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return q_size(&mq->dirty) == 0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * If we're busy we don't worry about cleaning at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static bool free_target_met(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) unsigned nr_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) percent_to_target(mq, FREE_TARGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static void mark_pending(struct smq_policy *mq, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) BUG_ON(e->sentinel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) BUG_ON(!e->allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) BUG_ON(e->pending_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) e->pending_work = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static void clear_pending(struct smq_policy *mq, struct entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) BUG_ON(!e->pending_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) e->pending_work = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static void queue_writeback(struct smq_policy *mq, bool idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct policy_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) mark_pending(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) q_del(&mq->dirty, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) work.op = POLICY_WRITEBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) work.oblock = e->oblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) work.cblock = infer_cblock(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) r = btracker_queue(mq->bg_work, &work, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) clear_pending(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) q_push_front(&mq->dirty, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static void queue_demotion(struct smq_policy *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct policy_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (WARN_ON_ONCE(!mq->migrations_allowed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (!clean_target_met(mq, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) queue_writeback(mq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) mark_pending(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) q_del(&mq->clean, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) work.op = POLICY_DEMOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) work.oblock = e->oblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) work.cblock = infer_cblock(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) r = btracker_queue(mq->bg_work, &work, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) clear_pending(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) q_push_front(&mq->clean, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct policy_work **workp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct policy_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (!mq->migrations_allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (allocator_empty(&mq->cache_alloc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * We always claim to be 'idle' to ensure some demotions happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * with continuous loads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (!free_target_met(mq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) queue_demotion(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (btracker_promotion_already_present(mq->bg_work, oblock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * We allocate the entry now to reserve the cblock. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * background work is aborted we must remember to free it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) e = alloc_entry(&mq->cache_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) BUG_ON(!e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) e->pending_work = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) work.op = POLICY_PROMOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) work.oblock = oblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) work.cblock = infer_cblock(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) r = btracker_queue(mq->bg_work, &work, workp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) free_entry(&mq->cache_alloc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) enum promote_result {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) PROMOTE_NOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) PROMOTE_TEMPORARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) PROMOTE_PERMANENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * Converts a boolean into a promote result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static enum promote_result maybe_promote(bool promote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return promote ? PROMOTE_PERMANENT : PROMOTE_NOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) int data_dir, bool fast_promote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (data_dir == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (!allocator_empty(&mq->cache_alloc) && fast_promote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return PROMOTE_TEMPORARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return maybe_promote(hs_e->level >= mq->write_promote_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return maybe_promote(hs_e->level >= mq->read_promote_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) sector_t r = from_oblock(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) (void) sector_div(r, mq->cache_blocks_per_hotspot_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return to_oblock(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) unsigned hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) dm_oblock_t hb = to_hblock(mq, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct entry *e = h_lookup(&mq->hotspot_table, hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) stats_level_accessed(&mq->hotspot_stats, e->level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) hi = get_index(&mq->hotspot_alloc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) q_requeue(&mq->hotspot, e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) test_and_set_bit(hi, mq->hotspot_hit_bits) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 0u : mq->hotspot_level_jump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) stats_miss(&mq->hotspot_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) e = alloc_entry(&mq->hotspot_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) e = q_pop(&mq->hotspot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) h_remove(&mq->hotspot_table, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) hi = get_index(&mq->hotspot_alloc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) clear_bit(hi, mq->hotspot_hit_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) e->oblock = hb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) q_push(&mq->hotspot, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) h_insert(&mq->hotspot_table, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * Public interface, via the policy struct. See dm-cache-policy.h for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * description of these.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return container_of(p, struct smq_policy, policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static void smq_destroy(struct dm_cache_policy *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) btracker_destroy(mq->bg_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) h_exit(&mq->hotspot_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) h_exit(&mq->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) free_bitset(mq->hotspot_hit_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) free_bitset(mq->cache_hit_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) space_exit(&mq->es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) kfree(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) int data_dir, bool fast_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct policy_work **work, bool *background_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) struct entry *e, *hs_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) enum promote_result pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) *background_work = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) e = h_lookup(&mq->table, oblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) stats_level_accessed(&mq->cache_stats, e->level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) requeue(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) *cblock = infer_cblock(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) stats_miss(&mq->cache_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * The hotspot queue only gets updated with misses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) hs_e = update_hotspot_queue(mq, oblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) pr = should_promote(mq, hs_e, data_dir, fast_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (pr != PROMOTE_NOT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) queue_promotion(mq, oblock, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) *background_work = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) int data_dir, bool fast_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) bool *background_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) spin_lock_irqsave(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) r = __lookup(mq, oblock, cblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) data_dir, fast_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) NULL, background_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) spin_unlock_irqrestore(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) static int smq_lookup_with_work(struct dm_cache_policy *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) dm_oblock_t oblock, dm_cblock_t *cblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) int data_dir, bool fast_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) struct policy_work **work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) bool background_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) spin_lock_irqsave(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) spin_unlock_irqrestore(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) struct policy_work **result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) spin_lock_irqsave(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) r = btracker_issue(mq->bg_work, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (r == -ENODATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (!clean_target_met(mq, idle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) queue_writeback(mq, idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) r = btracker_issue(mq->bg_work, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) spin_unlock_irqrestore(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * We need to clear any pending work flags that have been set, and in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * case of promotion free the entry for the destination cblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static void __complete_background_work(struct smq_policy *mq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) struct policy_work *work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) bool success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct entry *e = get_entry(&mq->cache_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) from_cblock(work->cblock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) switch (work->op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) case POLICY_PROMOTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) // !h, !q, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) clear_pending(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) e->oblock = work->oblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) e->level = NR_CACHE_LEVELS - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) push(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) // h, q, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) free_entry(&mq->cache_alloc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) // !h, !q, !a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) case POLICY_DEMOTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) // h, !q, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) h_remove(&mq->table, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) free_entry(&mq->cache_alloc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) // !h, !q, !a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) clear_pending(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) push_queue(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) // h, q, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) case POLICY_WRITEBACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) // h, !q, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) clear_pending(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) push_queue(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) // h, q, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) btracker_complete(mq->bg_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) static void smq_complete_background_work(struct dm_cache_policy *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) struct policy_work *work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) bool success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) spin_lock_irqsave(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) __complete_background_work(mq, work, success);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) spin_unlock_irqrestore(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) // in_hash(oblock) -> in_hash(oblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (e->pending_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) e->dirty = set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) del_queue(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) e->dirty = set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) push_queue(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) spin_lock_irqsave(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) __smq_set_clear_dirty(mq, cblock, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) spin_unlock_irqrestore(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) spin_lock_irqsave(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) __smq_set_clear_dirty(mq, cblock, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) spin_unlock_irqrestore(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static unsigned random_level(dm_cblock_t cblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) static int smq_load_mapping(struct dm_cache_policy *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) dm_oblock_t oblock, dm_cblock_t cblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) bool dirty, uint32_t hint, bool hint_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) struct entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) e->oblock = oblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) e->dirty = dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) e->pending_work = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * When we load mappings we push ahead of both sentinels in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * allow demotions and cleaning to occur immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) push_front(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (!e->allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) // FIXME: what if this block has pending background work?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) del_queue(mq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) h_remove(&mq->table, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) free_entry(&mq->cache_alloc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (!e->allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) return e->level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) static dm_cblock_t smq_residency(struct dm_cache_policy *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) dm_cblock_t r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) spin_lock_irqsave(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) r = to_cblock(mq->cache_alloc.nr_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) spin_unlock_irqrestore(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static void smq_tick(struct dm_cache_policy *p, bool can_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) spin_lock_irqsave(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) mq->tick++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) update_sentinels(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) end_hotspot_period(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) end_cache_period(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) spin_unlock_irqrestore(&mq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) struct smq_policy *mq = to_smq_policy(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) mq->migrations_allowed = allow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) * smq has no config values, but the old mq policy did. To avoid breaking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * software we continue to accept these configurables for the mq policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * but they have no effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static int mq_set_config_value(struct dm_cache_policy *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) const char *key, const char *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (kstrtoul(value, 10, &tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (!strcasecmp(key, "random_threshold") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) !strcasecmp(key, "sequential_threshold") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) !strcasecmp(key, "discard_promote_adjustment") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) !strcasecmp(key, "read_promote_adjustment") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) !strcasecmp(key, "write_promote_adjustment")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) unsigned maxlen, ssize_t *sz_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) ssize_t sz = *sz_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) DMEMIT("10 random_threshold 0 "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) "sequential_threshold 0 "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) "discard_promote_adjustment 0 "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) "read_promote_adjustment 0 "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) "write_promote_adjustment 0 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) *sz_ptr = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) /* Init the policy plugin interface function pointers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) mq->policy.destroy = smq_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) mq->policy.lookup = smq_lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) mq->policy.lookup_with_work = smq_lookup_with_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) mq->policy.get_background_work = smq_get_background_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) mq->policy.complete_background_work = smq_complete_background_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) mq->policy.set_dirty = smq_set_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) mq->policy.clear_dirty = smq_clear_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) mq->policy.load_mapping = smq_load_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) mq->policy.invalidate_mapping = smq_invalidate_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) mq->policy.get_hint = smq_get_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) mq->policy.residency = smq_residency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) mq->policy.tick = smq_tick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) mq->policy.allow_migrations = smq_allow_migrations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (mimic_mq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) mq->policy.set_config_value = mq_set_config_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) mq->policy.emit_config_values = mq_emit_config_values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) static bool too_many_hotspot_blocks(sector_t origin_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) sector_t hotspot_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) unsigned nr_hotspot_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static void calc_hotspot_params(sector_t origin_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) sector_t cache_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) unsigned nr_cache_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) sector_t *hotspot_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) unsigned *nr_hotspot_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) *hotspot_block_size = cache_block_size * 16u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) *nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) while ((*hotspot_block_size > cache_block_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) too_many_hotspot_blocks(origin_size, *hotspot_block_size, *nr_hotspot_blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) *hotspot_block_size /= 2u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) sector_t origin_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) sector_t cache_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) bool mimic_mq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) bool migrations_allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) unsigned total_sentinels = 2u * nr_sentinels_per_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (!mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) init_policy_functions(mq, mimic_mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) mq->cache_size = cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) mq->cache_block_size = cache_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) &mq->hotspot_block_size, &mq->nr_hotspot_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) mq->hotspot_level_jump = 1u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) DMERR("couldn't initialize entry space");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) goto bad_pool_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) for (i = 0; i < nr_sentinels_per_queue; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) for (i = 0; i < nr_sentinels_per_queue; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) total_sentinels + mq->nr_hotspot_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) init_allocator(&mq->cache_alloc, &mq->es,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) total_sentinels + mq->nr_hotspot_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (!mq->hotspot_hit_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) DMERR("couldn't allocate hotspot hit bitset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) goto bad_hotspot_hit_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (from_cblock(cache_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (!mq->cache_hit_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) DMERR("couldn't allocate cache hit bitset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) goto bad_cache_hit_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) mq->cache_hit_bits = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) mq->tick = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) spin_lock_init(&mq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) mq->hotspot.nr_top_levels = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) stats_init(&mq->cache_stats, NR_CACHE_LEVELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) goto bad_alloc_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) goto bad_alloc_hotspot_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) sentinels_init(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) mq->next_hotspot_period = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) mq->next_cache_period = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (!mq->bg_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) goto bad_btracker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) mq->migrations_allowed = migrations_allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) return &mq->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) bad_btracker:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) h_exit(&mq->hotspot_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) bad_alloc_hotspot_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) h_exit(&mq->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) bad_alloc_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) free_bitset(mq->cache_hit_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) bad_cache_hit_bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) free_bitset(mq->hotspot_hit_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) bad_hotspot_hit_bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) space_exit(&mq->es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) bad_pool_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) kfree(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) sector_t origin_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) sector_t cache_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return __smq_create(cache_size, origin_size, cache_block_size, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) sector_t origin_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) sector_t cache_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) return __smq_create(cache_size, origin_size, cache_block_size, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) sector_t origin_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) sector_t cache_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return __smq_create(cache_size, origin_size, cache_block_size, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) static struct dm_cache_policy_type smq_policy_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) .name = "smq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) .version = {2, 0, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) .hint_size = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) .create = smq_create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) static struct dm_cache_policy_type mq_policy_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) .name = "mq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) .version = {2, 0, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) .hint_size = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) .create = mq_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) static struct dm_cache_policy_type cleaner_policy_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) .name = "cleaner",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) .version = {2, 0, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) .hint_size = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) .create = cleaner_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) static struct dm_cache_policy_type default_policy_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) .name = "default",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) .version = {2, 0, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) .hint_size = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) .create = smq_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) .real = &smq_policy_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) static int __init smq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) r = dm_cache_policy_register(&smq_policy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) DMERR("register failed %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) r = dm_cache_policy_register(&mq_policy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) DMERR("register failed (as mq) %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) goto out_mq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) r = dm_cache_policy_register(&cleaner_policy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) DMERR("register failed (as cleaner) %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) goto out_cleaner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) r = dm_cache_policy_register(&default_policy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) DMERR("register failed (as default) %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) goto out_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) out_default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) dm_cache_policy_unregister(&cleaner_policy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) out_cleaner:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) dm_cache_policy_unregister(&mq_policy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) out_mq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) dm_cache_policy_unregister(&smq_policy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static void __exit smq_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) dm_cache_policy_unregister(&cleaner_policy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) dm_cache_policy_unregister(&smq_policy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) dm_cache_policy_unregister(&mq_policy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) dm_cache_policy_unregister(&default_policy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) module_init(smq_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) module_exit(smq_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) MODULE_DESCRIPTION("smq cache policy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) MODULE_ALIAS("dm-cache-default");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) MODULE_ALIAS("dm-cache-mq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) MODULE_ALIAS("dm-cache-cleaner");