^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Block driver for s390 storage class memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2012
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define KMSG_COMPONENT "scm_block"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/genhd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/eadm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "scm_blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) debug_info_t *scm_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static int scm_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static mempool_t *aidaw_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static DEFINE_SPINLOCK(list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static LIST_HEAD(inactive_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static unsigned int nr_requests = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static unsigned int nr_requests_per_io = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static atomic_t nr_devices = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) module_param(nr_requests, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) module_param(nr_requests_per_io, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) MODULE_ALIAS("scm:scmdev*");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static void __scm_free_rq(struct scm_request *scmrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct aob_rq_header *aobrq = to_aobrq(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) free_page((unsigned long) scmrq->aob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) kfree(scmrq->request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) kfree(aobrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static void scm_free_rqs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct list_head *iter, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct scm_request *scmrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) spin_lock_irq(&list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) list_for_each_safe(iter, safe, &inactive_requests) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) scmrq = list_entry(iter, struct scm_request, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) list_del(&scmrq->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) __scm_free_rq(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) spin_unlock_irq(&list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mempool_destroy(aidaw_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static int __scm_alloc_rq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct aob_rq_header *aobrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct scm_request *scmrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!aobrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) scmrq = (void *) aobrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (!scmrq->aob)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (!scmrq->request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) INIT_LIST_HEAD(&scmrq->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) spin_lock_irq(&list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) list_add(&scmrq->list, &inactive_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) spin_unlock_irq(&list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) __scm_free_rq(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static int scm_alloc_rqs(unsigned int nrqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (!aidaw_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) while (nrqs-- && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ret = __scm_alloc_rq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static struct scm_request *scm_request_fetch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct scm_request *scmrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) spin_lock_irq(&list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (list_empty(&inactive_requests))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) list_del(&scmrq->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) spin_unlock_irq(&list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return scmrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void scm_request_done(struct scm_request *scmrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct msb *msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u64 aidaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) msb = &scmrq->aob->msb[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) aidaw = msb->data_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) IS_ALIGNED(aidaw, PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) mempool_free(virt_to_page(aidaw), aidaw_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) spin_lock_irqsave(&list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) list_add(&scmrq->list, &inactive_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) spin_unlock_irqrestore(&list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline struct aidaw *scm_aidaw_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return page ? page_address(page) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned long _aidaw = (unsigned long) aidaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct aidaw *aidaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return scmrq->next_aidaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) aidaw = scm_aidaw_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (aidaw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) memset(aidaw, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return aidaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static int scm_request_prepare(struct scm_request *scmrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct scm_blk_dev *bdev = scmrq->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct scm_device *scmdev = bdev->gendisk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int pos = scmrq->aob->request.msb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct msb *msb = &scmrq->aob->msb[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct request *req = scmrq->request[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct req_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct aidaw *aidaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!aidaw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) msb->bs = MSB_BS_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) scmrq->aob->request.msb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) msb->flags |= MSB_FLAG_IDA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) msb->data_addr = (u64) aidaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) rq_for_each_segment(bv, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) WARN_ON(bv.bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) msb->blk_count += bv.bv_len >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) aidaw->data_addr = (u64) page_address(bv.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) aidaw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) scmrq->next_aidaw = aidaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static inline void scm_request_set(struct scm_request *scmrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) scmrq->request[scmrq->aob->request.msb_count] = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static inline void scm_request_init(struct scm_blk_dev *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct scm_request *scmrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct aob_rq_header *aobrq = to_aobrq(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct aob *aob = scmrq->aob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) memset(scmrq->request, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) nr_requests_per_io * sizeof(scmrq->request[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) memset(aob, 0, sizeof(*aob));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) aobrq->scmdev = bdev->scmdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) aob->request.cmd_code = ARQB_CMD_MOVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) aob->request.data = (u64) aobrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) scmrq->bdev = bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) scmrq->retries = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) scmrq->error = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* We don't use all msbs - place aidaws at the end of the aob page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void scm_request_requeue(struct scm_request *scmrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct scm_blk_dev *bdev = scmrq->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) blk_mq_requeue_request(scmrq->request[i], false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) atomic_dec(&bdev->queued_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) scm_request_done(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) blk_mq_kick_requeue_list(bdev->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static void scm_request_finish(struct scm_request *scmrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct scm_blk_dev *bdev = scmrq->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) blk_status_t *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) error = blk_mq_rq_to_pdu(scmrq->request[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *error = scmrq->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (likely(!blk_should_fake_timeout(scmrq->request[i]->q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) blk_mq_complete_request(scmrq->request[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) atomic_dec(&bdev->queued_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) scm_request_done(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void scm_request_start(struct scm_request *scmrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct scm_blk_dev *bdev = scmrq->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) atomic_inc(&bdev->queued_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (eadm_start_aob(scmrq->aob)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) SCM_LOG(5, "no subchannel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) scm_request_requeue(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct scm_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct scm_request *scmrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) const struct blk_mq_queue_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct scm_device *scmdev = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct scm_queue *sq = hctx->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct request *req = qd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct scm_request *scmrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) spin_lock(&sq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!scm_permit_request(bdev, req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) spin_unlock(&sq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) scmrq = sq->scmrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!scmrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) scmrq = scm_request_fetch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (!scmrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) SCM_LOG(5, "no request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) spin_unlock(&sq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) scm_request_init(bdev, scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) sq->scmrq = scmrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) scm_request_set(scmrq, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (scm_request_prepare(scmrq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) SCM_LOG(5, "aidaw alloc failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) scm_request_set(scmrq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (scmrq->aob->request.msb_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) scm_request_start(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) sq->scmrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) spin_unlock(&sq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) blk_mq_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) scm_request_start(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) sq->scmrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) spin_unlock(&sq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) spin_lock_init(&qd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) hctx->driver_data = qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct scm_queue *qd = hctx->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) WARN_ON(qd->scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) kfree(hctx->driver_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) hctx->driver_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static void __scmrq_log_error(struct scm_request *scmrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct aob *aob = scmrq->aob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (scmrq->error == BLK_STS_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) SCM_LOG(1, "Request timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) SCM_LOG(1, "Request error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (scmrq->retries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) SCM_LOG(1, "Retry request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pr_err("An I/O operation to SCM failed with rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) scmrq->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static void scm_blk_handle_error(struct scm_request *scmrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct scm_blk_dev *bdev = scmrq->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (scmrq->error != BLK_STS_IOERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* For -EIO the response block is valid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) switch (scmrq->aob->response.eqc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) case EQC_WR_PROHIBIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) spin_lock_irqsave(&bdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (bdev->state != SCM_WR_PROHIBIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) pr_info("%lx: Write access to the SCM increment is suspended\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) (unsigned long) bdev->scmdev->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) bdev->state = SCM_WR_PROHIBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) spin_unlock_irqrestore(&bdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!eadm_start_aob(scmrq->aob))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) requeue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) scm_request_requeue(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct scm_request *scmrq = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) scmrq->error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) __scmrq_log_error(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (scmrq->retries-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) scm_blk_handle_error(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) scm_request_finish(scmrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void scm_blk_request_done(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) blk_status_t *error = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) blk_mq_end_request(req, *error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static const struct block_device_operations scm_blk_devops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static const struct blk_mq_ops scm_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) .queue_rq = scm_blk_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) .complete = scm_blk_request_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) .init_hctx = scm_blk_init_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) .exit_hctx = scm_blk_exit_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) unsigned int devindex, nr_max_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct request_queue *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int len, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) devindex = atomic_inc_return(&nr_devices) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* scma..scmz + scmaa..scmzz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (devindex > 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) bdev->scmdev = scmdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) bdev->state = SCM_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) spin_lock_init(&bdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) atomic_set(&bdev->queued_reqs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) bdev->tag_set.ops = &scm_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) bdev->tag_set.cmd_size = sizeof(blk_status_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) bdev->tag_set.nr_hw_queues = nr_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) bdev->tag_set.numa_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ret = blk_mq_alloc_tag_set(&bdev->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) rq = blk_mq_init_queue(&bdev->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (IS_ERR(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ret = PTR_ERR(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto out_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) bdev->rq = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) nr_max_blk = min(scmdev->nr_max_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) blk_queue_logical_block_size(rq, 1 << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) blk_queue_max_segments(rq, nr_max_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) bdev->gendisk = alloc_disk(SCM_NR_PARTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (!bdev->gendisk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) goto out_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rq->queuedata = scmdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) bdev->gendisk->private_data = scmdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) bdev->gendisk->fops = &scm_blk_devops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) bdev->gendisk->queue = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) bdev->gendisk->major = scm_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (devindex > 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) len += snprintf(bdev->gendisk->disk_name + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) DISK_NAME_LEN - len, "%c",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 'a' + (devindex / 26) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) devindex = devindex % 26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 'a' + devindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* 512 byte sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) set_capacity(bdev->gendisk, scmdev->size >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) out_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) blk_cleanup_queue(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) out_tag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) blk_mq_free_tag_set(&bdev->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) atomic_dec(&nr_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) del_gendisk(bdev->gendisk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) blk_cleanup_queue(bdev->gendisk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) blk_mq_free_tag_set(&bdev->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) put_disk(bdev->gendisk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) void scm_blk_set_available(struct scm_blk_dev *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) spin_lock_irqsave(&bdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (bdev->state == SCM_WR_PROHIBIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) pr_info("%lx: Write access to the SCM increment is restored\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) (unsigned long) bdev->scmdev->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) bdev->state = SCM_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) spin_unlock_irqrestore(&bdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static bool __init scm_blk_params_valid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (!nr_requests_per_io || nr_requests_per_io > 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static int __init scm_blk_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (!scm_blk_params_valid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ret = register_blkdev(0, "scm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) scm_major = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ret = scm_alloc_rqs(nr_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) scm_debug = debug_register("scm_log", 16, 1, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (!scm_debug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) debug_register_view(scm_debug, &debug_hex_ascii_view);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) debug_set_level(scm_debug, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ret = scm_drv_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) goto out_dbf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) out_dbf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) debug_unregister(scm_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) scm_free_rqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) unregister_blkdev(scm_major, "scm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) module_init(scm_blk_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static void __exit scm_blk_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) scm_drv_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) debug_unregister(scm_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) scm_free_rqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) unregister_blkdev(scm_major, "scm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) module_exit(scm_blk_cleanup);