Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * virtio_pmem.c: Virtio pmem Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Discovers persistent memory range information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * from host and provides a virtio based flushing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include "virtio_pmem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "nd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  /* The interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) void virtio_pmem_host_ack(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	struct virtio_pmem *vpmem = vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	struct virtio_pmem_request *req_data, *req_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	spin_lock_irqsave(&vpmem->pmem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	while ((req_data = virtqueue_get_buf(vq, &len)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 		req_data->done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 		wake_up(&req_data->host_acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 		if (!list_empty(&vpmem->req_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 			req_buf = list_first_entry(&vpmem->req_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 					struct virtio_pmem_request, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 			req_buf->wq_buf_avail = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 			wake_up(&req_buf->wq_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 			list_del(&req_buf->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) EXPORT_SYMBOL_GPL(virtio_pmem_host_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  /* The request submission function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static int virtio_pmem_flush(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct virtio_device *vdev = nd_region->provider_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct virtio_pmem *vpmem  = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct virtio_pmem_request *req_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	struct scatterlist *sgs[2], sg, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	int err, err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	if (!req_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	req_data->done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	init_waitqueue_head(&req_data->host_acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	init_waitqueue_head(&req_data->wq_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	INIT_LIST_HEAD(&req_data->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	req_data->req.type = cpu_to_le32(VIRTIO_PMEM_REQ_TYPE_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	sg_init_one(&sg, &req_data->req, sizeof(req_data->req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	sgs[0] = &sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	sg_init_one(&ret, &req_data->resp.ret, sizeof(req_data->resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	sgs[1] = &ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	spin_lock_irqsave(&vpmem->pmem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	 /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	  * If virtqueue_add_sgs returns -ENOSPC then req_vq virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	  * queue does not have free descriptor. We add the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	  * to req_list and wait for host_ack to wake us up when free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	  * slots are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	while ((err = virtqueue_add_sgs(vpmem->req_vq, sgs, 1, 1, req_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 					GFP_ATOMIC)) == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		dev_info(&vdev->dev, "failed to send command to virtio pmem device, no free slots in the virtqueue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		req_data->wq_buf_avail = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		list_add_tail(&req_data->list, &vpmem->req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		/* A host response results in "host_ack" getting called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		wait_event(req_data->wq_buf, req_data->wq_buf_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		spin_lock_irqsave(&vpmem->pmem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	err1 = virtqueue_kick(vpmem->req_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 * virtqueue_add_sgs failed with error different than -ENOSPC, we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 * do anything about that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	if (err || !err1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		dev_info(&vdev->dev, "failed to send command to virtio pmem device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		/* A host repsonse results in "host_ack" getting called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		wait_event(req_data->host_acked, req_data->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		err = le32_to_cpu(req_data->resp.ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	kfree(req_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* The asynchronous flush callback function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	 * Create child bio for asynchronous flush and chain with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	 * parent bio. Otherwise directly call nd_region flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (bio && bio->bi_iter.bi_sector != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		struct bio *child = bio_alloc(GFP_ATOMIC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		if (!child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		bio_copy_dev(child, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		child->bi_opf = REQ_PREFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		child->bi_iter.bi_sector = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		bio_chain(child, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		submit_bio(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (virtio_pmem_flush(nd_region))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) EXPORT_SYMBOL_GPL(async_pmem_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) MODULE_LICENSE("GPL");