^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2016 CNEX Labs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Initial release: Javier Gonzalez <javier@cnexlabs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Based upon the circular ringbuffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modify it under the terms of the GNU General Public License version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * 2 as published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * pblk-rb.c - pblk's write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/circ_buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "pblk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static DECLARE_RWSEM(pblk_rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static void pblk_rb_data_free(struct pblk_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct pblk_rb_pages *p, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) down_write(&pblk_rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) list_for_each_entry_safe(p, t, &rb->pages, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) free_pages((unsigned long)page_address(p->pages), p->order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) list_del(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) up_write(&pblk_rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void pblk_rb_free(struct pblk_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) pblk_rb_data_free(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) vfree(rb->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * pblk_rb_calculate_size -- calculate the size of the write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static unsigned int pblk_rb_calculate_size(unsigned int nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned int threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int thr_sz = 1 << (get_count_order(threshold + NVM_MAX_VLBA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned int max_sz = max(thr_sz, nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned int max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Alloc a write buffer that can (i) fit at least two split bios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * (considering max I/O size NVM_MAX_VLBA, and (ii) guarantee that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * threshold will be respected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) max_io = (1 << max((int)(get_count_order(max_sz)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) (int)(get_count_order(NVM_MAX_VLBA << 1))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if ((threshold + NVM_MAX_VLBA) >= max_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) max_io <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Initialize ring buffer. The data and metadata buffers must be previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * allocated and their size must be a power of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * (Documentation/core-api/circular-buffers.rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned int seg_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct pblk *pblk = container_of(rb, struct pblk, rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct pblk_rb_entry *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned int init_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned int max_order = MAX_ORDER - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned int power_size, power_seg_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int alloc_order, order, iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) nr_entries = pblk_rb_calculate_size(size, threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (!entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) power_size = get_count_order(nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) power_seg_sz = get_count_order(seg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) down_write(&pblk_rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) rb->entries = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) rb->seg_size = (1 << power_seg_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rb->nr_entries = (1 << power_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) rb->back_thres = threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) rb->flush_point = EMPTY_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) spin_lock_init(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) spin_lock_init(&rb->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) INIT_LIST_HEAD(&rb->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) alloc_order = power_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (alloc_order >= max_order) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) order = max_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) iter = (1 << (alloc_order - max_order));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) order = alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) iter = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct pblk_rb_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct pblk_rb_pages *page_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned long set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (!page_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) up_write(&pblk_rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) vfree(entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) page_set->order = order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) page_set->pages = alloc_pages(GFP_KERNEL, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!page_set->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) kfree(page_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) pblk_rb_data_free(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) up_write(&pblk_rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) vfree(entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) kaddr = page_address(page_set->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) entry = &rb->entries[init_entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) entry->data = kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) entry->cacheline = pblk_cacheline_to_addr(init_entry++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) set_size = (1 << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) for (i = 1; i < set_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) entry = &rb->entries[init_entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) entry->cacheline = pblk_cacheline_to_addr(init_entry++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) entry->data = kaddr + (i * rb->seg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) bio_list_init(&entry->w_ctx.bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) list_add_tail(&page_set->list, &rb->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) iter--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) } while (iter > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) up_write(&pblk_rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) atomic_set(&rb->inflight_flush_point, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Initialize rate-limiter, which controls access to the write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * by user and GC I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pblk_rl_init(&pblk->rl, rb->nr_entries, threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void clean_wctx(struct pblk_w_ctx *w_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) flags = READ_ONCE(w_ctx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) WARN_ONCE(!(flags & PBLK_SUBMITTED_ENTRY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) "pblk: overwriting unsubmitted data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Release flags on context. Protect from writes and reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) pblk_ppa_set_empty(&w_ctx->ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) w_ctx->lba = ADDR_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define pblk_rb_ring_space(rb, head, tail, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) (CIRC_SPACE(head, tail, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * Buffer space is calculated with respect to the back pointer signaling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * synchronized entries to the media.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static unsigned int pblk_rb_space(struct pblk_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned int mem = READ_ONCE(rb->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned int sync = READ_ONCE(rb->sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return (p + nr_entries) & (rb->nr_entries - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Buffer count is calculated with respect to the submission entry signaling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * entries that are available to send to the media
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) unsigned int pblk_rb_read_count(struct pblk_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) unsigned int mem = READ_ONCE(rb->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned int subm = READ_ONCE(rb->subm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return pblk_rb_ring_count(mem, subm, rb->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned int pblk_rb_sync_count(struct pblk_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unsigned int mem = READ_ONCE(rb->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned int sync = READ_ONCE(rb->sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return pblk_rb_ring_count(mem, sync, rb->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned int subm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) subm = READ_ONCE(rb->subm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Commit read means updating submission pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) smp_store_release(&rb->subm, pblk_rb_ptr_wrap(rb, subm, nr_entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return subm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct pblk *pblk = container_of(rb, struct pblk, rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct pblk_line *line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct pblk_rb_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct pblk_w_ctx *w_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) unsigned int user_io = 0, gc_io = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) for (i = 0; i < to_update; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) entry = &rb->entries[rb->l2p_update];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) w_ctx = &entry->w_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) flags = READ_ONCE(entry->w_ctx.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (flags & PBLK_IOTYPE_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) user_io++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) else if (flags & PBLK_IOTYPE_GC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) gc_io++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) WARN(1, "pblk: unknown IO type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) entry->cacheline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) line = pblk_ppa_to_line(pblk, w_ctx->ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) atomic_dec(&line->sec_to_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) kref_put(&line->ref, pblk_line_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) clean_wctx(w_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rb->l2p_update = pblk_rb_ptr_wrap(rb, rb->l2p_update, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pblk_rl_out(&pblk->rl, user_io, gc_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * When we move the l2p_update pointer, we update the l2p table - lookups will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * point to the physical address instead of to the cacheline in the write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * from this moment on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned int mem, unsigned int sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned int space, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) lockdep_assert_held(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* Update l2p only as buffer entries are being overwritten */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (space > nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) count = nr_entries - space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* l2p_update used exclusively under rb->w_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ret = __pblk_rb_update_l2p(rb, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Update the l2p entry for all sectors stored on the write buffer. This means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * that all future lookups to the l2p table will point to a device address, not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * to the cacheline in the write buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) void pblk_rb_sync_l2p(struct pblk_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) unsigned int sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned int to_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) spin_lock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Protect from reads and writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) sync = smp_load_acquire(&rb->sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) __pblk_rb_update_l2p(rb, to_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) spin_unlock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * Write @nr_entries to ring buffer from @data buffer if there is enough space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Typically, 4KB data chunks coming from a bio will be copied to the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * buffer, thus the write will fail if not all incoming data can be copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct pblk_w_ctx w_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct pblk_rb_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) memcpy(entry->data, data, rb->seg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) entry->w_ctx.lba = w_ctx.lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) entry->w_ctx.ppa = w_ctx.ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct pblk_w_ctx w_ctx, unsigned int ring_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct pblk *pblk = container_of(rb, struct pblk, rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct pblk_rb_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) entry = &rb->entries[ring_pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) flags = READ_ONCE(entry->w_ctx.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Caller must guarantee that the entry is free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) __pblk_rb_write_entry(rb, data, w_ctx, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) flags = w_ctx.flags | PBLK_WRITTEN_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* Release flags on write context. Protect from writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) smp_store_release(&entry->w_ctx.flags, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct pblk_w_ctx w_ctx, struct pblk_line *line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u64 paddr, unsigned int ring_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct pblk *pblk = container_of(rb, struct pblk, rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct pblk_rb_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) entry = &rb->entries[ring_pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) flags = READ_ONCE(entry->w_ctx.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Caller must guarantee that the entry is free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) __pblk_rb_write_entry(rb, data, w_ctx, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) entry->w_ctx.lba = ADDR_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) flags = w_ctx.flags | PBLK_WRITTEN_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* Release flags on write context. Protect from writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) smp_store_release(&entry->w_ctx.flags, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) unsigned int pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct pblk_rb_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned int sync, flush_point;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) pblk_rb_sync_init(rb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) sync = READ_ONCE(rb->sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (pos == sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pblk_rb_sync_end(rb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) atomic_inc(&rb->inflight_flush_point);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) entry = &rb->entries[flush_point];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Protect flush points */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) smp_store_release(&rb->flush_point, flush_point);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) bio_list_add(&entry->w_ctx.bios, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) pblk_rb_sync_end(rb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return bio ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unsigned int *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned int mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) unsigned int sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) unsigned int threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) sync = READ_ONCE(rb->sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) mem = READ_ONCE(rb->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) threshold = nr_entries + rb->back_thres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (pblk_rb_update_l2p(rb, nr_entries, mem, sync))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) *pos = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) unsigned int *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!__pblk_rb_may_write(rb, nr_entries, pos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* Protect from read count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) smp_store_release(&rb->mem, pblk_rb_ptr_wrap(rb, *pos, nr_entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) void pblk_rb_flush(struct pblk_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct pblk *pblk = container_of(rb, struct pblk, rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) unsigned int mem = READ_ONCE(rb->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (pblk_rb_flush_point_set(rb, NULL, mem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) pblk_write_kick(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) unsigned int *pos, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int *io_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) unsigned int mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!__pblk_rb_may_write(rb, nr_entries, pos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) mem = pblk_rb_ptr_wrap(rb, *pos, nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) *io_ret = NVM_IO_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (bio->bi_opf & REQ_PREFLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct pblk *pblk = container_of(rb, struct pblk, rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) atomic64_inc(&pblk->nr_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) *io_ret = NVM_IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Protect from read count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) smp_store_release(&rb->mem, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Atomically check that (i) there is space on the write buffer for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * incoming I/O, and (ii) the current I/O type has enough budget in the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * buffer (rate-limiter).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) unsigned int nr_entries, unsigned int *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct pblk *pblk = container_of(rb, struct pblk, rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int io_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) spin_lock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (io_ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) spin_unlock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return io_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) spin_unlock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return NVM_IO_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) pblk_rl_user_in(&pblk->rl, nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) spin_unlock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return io_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * Look at pblk_rb_may_write_user comment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unsigned int *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct pblk *pblk = container_of(rb, struct pblk, rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) spin_lock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) spin_unlock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (!pblk_rb_may_write(rb, nr_entries, pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) spin_unlock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) pblk_rl_gc_in(&pblk->rl, nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) spin_unlock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * Read available entries on rb and add them to the given bio. To avoid a memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * copy, a page reference to the write buffer is used to be added to the bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * This function is used by the write thread to form the write bio that will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * persist data on the write buffer to the media.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) unsigned int pos, unsigned int nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct pblk *pblk = container_of(rb, struct pblk, rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct request_queue *q = pblk->dev->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct bio *bio = rqd->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct pblk_rb_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) unsigned int pad = 0, to_read = nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (count < nr_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) pad = nr_entries - count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) to_read = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* Add space for packed metadata if in use*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) pad += (pblk->min_write_pgs - pblk->min_write_pgs_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) c_ctx->sentry = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) c_ctx->nr_valid = to_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) c_ctx->nr_padded = pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) for (i = 0; i < to_read; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) entry = &rb->entries[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* A write has been allowed into the buffer, but data is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * being copied to it. It is ok to busy wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) try:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) flags = READ_ONCE(entry->w_ctx.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!(flags & PBLK_WRITTEN_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) goto try;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) page = virt_to_page(entry->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) pblk_err(pblk, "could not allocate write bio page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) flags &= ~PBLK_WRITTEN_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) flags |= PBLK_SUBMITTED_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* Release flags on context. Protect from writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) smp_store_release(&entry->w_ctx.flags, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return NVM_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) rb->seg_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) pblk_err(pblk, "could not add page to write bio\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) flags &= ~PBLK_WRITTEN_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) flags |= PBLK_SUBMITTED_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Release flags on context. Protect from writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) smp_store_release(&entry->w_ctx.flags, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return NVM_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) flags &= ~PBLK_WRITTEN_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) flags |= PBLK_SUBMITTED_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* Release flags on context. Protect from writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) smp_store_release(&entry->w_ctx.flags, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) pos = pblk_rb_ptr_wrap(rb, pos, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (pad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) pblk_err(pblk, "could not pad page in write bio\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return NVM_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (pad < pblk->min_write_pgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) atomic64_inc(&pblk->pad_dist[pad - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pblk_warn(pblk, "padding more than min. sectors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) atomic64_add(pad, &pblk->pad_wa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) atomic_long_add(pad, &pblk->padded_writes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return NVM_IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * Copy to bio only if the lba matches the one on the given cache entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * Otherwise, it means that the entry has been overwritten, and the bio should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * be directed to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct ppa_addr ppa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct pblk *pblk = container_of(rb, struct pblk, rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct pblk_rb_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct pblk_w_ctx *w_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct ppa_addr l2p_ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) u64 pos = pblk_addr_to_cacheline(ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* Caller must ensure that the access will not cause an overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) BUG_ON(pos >= rb->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) entry = &rb->entries[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) w_ctx = &entry->w_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) flags = READ_ONCE(w_ctx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) spin_lock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) spin_lock(&pblk->trans_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) l2p_ppa = pblk_trans_map_get(pblk, lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) spin_unlock(&pblk->trans_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* Check if the entry has been overwritten or is scheduled to be */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (!pblk_ppa_comp(l2p_ppa, ppa) || w_ctx->lba != lba ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) flags & PBLK_WRITABLE_ENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) data = bio_data(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) memcpy(data, entry->data, rb->seg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) spin_unlock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) unsigned int entry = pblk_rb_ptr_wrap(rb, pos, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return &rb->entries[entry].w_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) __acquires(&rb->s_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) spin_lock_irqsave(&rb->s_lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) spin_lock_irq(&rb->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return rb->sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) __releases(&rb->s_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) lockdep_assert_held(&rb->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) spin_unlock_irqrestore(&rb->s_lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) spin_unlock_irq(&rb->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) unsigned int sync, flush_point;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) lockdep_assert_held(&rb->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) sync = READ_ONCE(rb->sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) flush_point = READ_ONCE(rb->flush_point);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (flush_point != EMPTY_ENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) unsigned int secs_to_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) secs_to_flush = pblk_rb_ring_count(flush_point, sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) rb->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (secs_to_flush < nr_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* Protect flush points */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) smp_store_release(&rb->flush_point, EMPTY_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) sync = pblk_rb_ptr_wrap(rb, sync, nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Protect from counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) smp_store_release(&rb->sync, sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* Calculate how many sectors to submit up to the current flush point. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) unsigned int subm, sync, flush_point;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) unsigned int submitted, to_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* Protect flush points */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) flush_point = smp_load_acquire(&rb->flush_point);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (flush_point == EMPTY_ENTRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* Protect syncs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) sync = smp_load_acquire(&rb->sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) subm = READ_ONCE(rb->subm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* The sync point itself counts as a sector to sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return (submitted < to_flush) ? (to_flush - submitted) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int pblk_rb_tear_down_check(struct pblk_rb *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct pblk_rb_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) spin_lock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) spin_lock_irq(&rb->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) (rb->sync == rb->l2p_update) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) (rb->flush_point == EMPTY_ENTRY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!rb->entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) for (i = 0; i < rb->nr_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) entry = &rb->entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!entry->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) spin_unlock_irq(&rb->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) spin_unlock(&rb->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return (pos & (rb->nr_entries - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return (pos >= rb->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct pblk *pblk = container_of(rb, struct pblk, rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct pblk_c_ctx *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ssize_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) int queued_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) spin_lock_irq(&rb->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) list_for_each_entry(c, &pblk->compl_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) queued_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) spin_unlock_irq(&rb->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (rb->flush_point != EMPTY_ENTRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) offset = scnprintf(buf, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) rb->nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) rb->mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) rb->subm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) rb->sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) rb->l2p_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) atomic_read(&rb->inflight_flush_point),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) rb->flush_point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) pblk_rb_read_count(rb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) pblk_rb_space(rb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) pblk_rb_flush_point_count(rb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) queued_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) offset = scnprintf(buf, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) rb->nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) rb->mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) rb->subm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) rb->sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) rb->l2p_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) atomic_read(&rb->inflight_flush_point),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) pblk_rb_read_count(rb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) pblk_rb_space(rb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) pblk_rb_flush_point_count(rb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) queued_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }