^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Functions related to mapping data to requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct bio_map_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) bool is_our_pages : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) bool is_null_mapped : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct iov_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct iovec iov[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct bio_map_data *bmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (data->nr_segs > UIO_MAXIOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (!bmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) bmd->iter = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bmd->iter.iov = bmd->iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return bmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * bio_copy_from_iter - copy all pages from iov_iter to bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * @bio: The &struct bio which describes the I/O as destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * @iter: iov_iter as source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Copy all pages from iov_iter to bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Returns 0 on success, or error on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct bio_vec *bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) bio_for_each_segment_all(bvec, bio, iter_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ret = copy_page_from_iter(bvec->bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) bvec->bv_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) bvec->bv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!iov_iter_count(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (ret < bvec->bv_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * bio_copy_to_iter - copy all pages from bio to iov_iter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @bio: The &struct bio which describes the I/O as source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @iter: iov_iter as destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Copy all pages from bio to iov_iter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Returns 0 on success, or error on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct bio_vec *bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) bio_for_each_segment_all(bvec, bio, iter_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ret = copy_page_to_iter(bvec->bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) bvec->bv_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) bvec->bv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) &iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (!iov_iter_count(&iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (ret < bvec->bv_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * bio_uncopy_user - finish previously mapped bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @bio: bio being terminated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * Free pages allocated from bio_copy_user_iov() and write back data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * to user space in case of a read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int bio_uncopy_user(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct bio_map_data *bmd = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (!bmd->is_null_mapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * if we're in a workqueue, the request is orphaned, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * don't copy into a random user address space, just free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * and return -EINTR so user space doesn't expect any data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!current->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) else if (bio_data_dir(bio) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ret = bio_copy_to_iter(bio, bmd->iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (bmd->is_our_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) bio_free_pages(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) kfree(bmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct iov_iter *iter, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct bio_map_data *bmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct bio *bio, *bounce_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int i = 0, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned int len = iter->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) bmd = bio_alloc_map_data(iter, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (!bmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * We need to do a deep copy of the iov_iter including the iovecs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * The caller provided iov might point to an on-stack or otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * shortlived one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bmd->is_our_pages = !map_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) bmd->is_null_mapped = (map_data && map_data->null_mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (nr_pages > BIO_MAX_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) nr_pages = BIO_MAX_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) bio = bio_kmalloc(gfp_mask, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) goto out_bmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) bio->bi_opf |= req_op(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (map_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) nr_pages = 1 << map_data->page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) i = map_data->offset / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned int bytes = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) bytes -= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (bytes > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bytes = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (map_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (i == map_data->nr_entries * nr_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) page = map_data->pages[i / nr_pages];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) page += (i % nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) page = alloc_page(rq->q->bounce_gfp | gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!map_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) len -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (map_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) map_data->offset += bio->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if ((iov_iter_rw(iter) == WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) (!map_data || !map_data->null_mapped)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) (map_data && map_data->from_user)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ret = bio_copy_from_iter(bio, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (bmd->is_our_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) zero_fill_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) iov_iter_advance(iter, bio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) bio->bi_private = bmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) bounce_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ret = blk_rq_append_bio(rq, &bounce_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * We link the bounce buffer in and could have to traverse it later, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * we have to get a ref to prevent it from being freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) bio_get(bounce_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!map_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) bio_free_pages(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) out_bmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) kfree(bmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned int max_sectors = queue_max_hw_sectors(rq->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct bio *bio, *bounce_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!iov_iter_count(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) bio->bi_opf |= req_op(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) while (iov_iter_count(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ssize_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) size_t offs, added = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (unlikely(bytes <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ret = bytes ? bytes : -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (unlikely(offs & queue_dma_alignment(rq->q))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) for (j = 0; j < npages; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct page *page = pages[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned int n = PAGE_SIZE - offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) bool same_page = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (n > bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) n = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!bio_add_hw_page(rq->q, bio, page, n, offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) max_sectors, &same_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (same_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) added += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) bytes -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) iov_iter_advance(iter, added);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * release the pages we didn't map into the bio, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) while (j < npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) put_page(pages[j++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* couldn't stuff something into bio? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * Subtle: if we end up needing to bounce a bio, it would normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * disappear when its bi_end_io is run. However, we need the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * bio for the unmap, so grab an extra reference to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bio_get(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) bounce_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ret = blk_rq_append_bio(rq, &bounce_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) goto out_put_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * We link the bounce buffer in and could have to traverse it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * later, so we have to get a ref to prevent it from being freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) bio_get(bounce_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) out_put_orig:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) out_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) bio_release_pages(bio, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * bio_unmap_user - unmap a bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * @bio: the bio being unmapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * bio_unmap_user() may sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static void bio_unmap_user(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) bio_release_pages(bio, bio_data_dir(bio) == READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static void bio_invalidate_vmalloc_pages(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (bio->bi_private && !op_is_write(bio_op(bio))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned long i, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) for (i = 0; i < bio->bi_vcnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) len += bio->bi_io_vec[i].bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) invalidate_kernel_vmap_range(bio->bi_private, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void bio_map_kern_endio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) bio_invalidate_vmalloc_pages(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * bio_map_kern - map kernel address into bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * @q: the struct request_queue for the bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * @data: pointer to buffer to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * @len: length in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * @gfp_mask: allocation flags for bio allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * Map the kernel address into a bio suitable for io to a block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * device. Returns an error pointer in case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static struct bio *bio_map_kern(struct request_queue *q, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned int len, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) unsigned long kaddr = (unsigned long)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned long start = kaddr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) const int nr_pages = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) bool is_vmalloc = is_vmalloc_addr(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int offset, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) bio = bio_kmalloc(gfp_mask, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (is_vmalloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) flush_kernel_vmap_range(data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) bio->bi_private = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) offset = offset_in_page(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned int bytes = PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (bytes > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) bytes = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!is_vmalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) page = virt_to_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) page = vmalloc_to_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (bio_add_pc_page(q, bio, page, bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) offset) < bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* we don't support partial mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) data += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) len -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) bio->bi_end_io = bio_map_kern_endio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void bio_copy_kern_endio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) bio_free_pages(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void bio_copy_kern_endio_read(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) char *p = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct bio_vec *bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) bio_for_each_segment_all(bvec, bio, iter_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) p += bvec->bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) bio_copy_kern_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * bio_copy_kern - copy kernel address into bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * @q: the struct request_queue for the bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * @data: pointer to buffer to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * @len: length in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * @gfp_mask: allocation flags for bio and page allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * @reading: data direction is READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * copy the kernel address into a bio suitable for io to a block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * device. Returns an error pointer in case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static struct bio *bio_copy_kern(struct request_queue *q, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) unsigned int len, gfp_t gfp_mask, int reading)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unsigned long kaddr = (unsigned long)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) unsigned long start = kaddr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) void *p = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * Overflow, abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (end < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) nr_pages = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) bio = bio_kmalloc(gfp_mask, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) unsigned int bytes = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (bytes > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) bytes = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) page = alloc_page(q->bounce_gfp | gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!reading)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) memcpy(page_address(page), p, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) len -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) p += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (reading) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) bio->bi_end_io = bio_copy_kern_endio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) bio->bi_private = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) bio->bi_end_io = bio_copy_kern_endio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) bio_free_pages(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * Append a bio to a passthrough request. Only works if the bio can be merged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * into the request based on the driver constraints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int blk_rq_append_bio(struct request *rq, struct bio **bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct bio *orig_bio = *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) unsigned int nr_segs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) blk_queue_bounce(rq->q, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) bio_for_each_bvec(bv, *bio, iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) nr_segs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!rq->bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) blk_rq_bio_prep(rq, *bio, nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (orig_bio != *bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) bio_put(*bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) *bio = orig_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) rq->biotail->bi_next = *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) rq->biotail = *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) rq->__data_len += (*bio)->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) bio_crypt_free_ctx(*bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) EXPORT_SYMBOL(blk_rq_append_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * blk_rq_map_user_iov - map user data to a request, for passthrough requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * @q: request queue where request should be inserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * @rq: request to map data to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * @map_data: pointer to the rq_map_data holding pages (if necessary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * @iter: iovec iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * @gfp_mask: memory allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * Data will be mapped directly for zero copy I/O, if possible. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * a kernel bounce buffer is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * still in process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * Note: The mapped bio may need to be bounced through blk_queue_bounce()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * before being submitted to the device, as pages mapped may be out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * reach. It's the callers responsibility to make sure this happens. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * original bio must be passed back in to blk_rq_unmap_user() for proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * unmapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct rq_map_data *map_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) const struct iov_iter *iter, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) bool copy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct iov_iter i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (!iter_is_iovec(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (map_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) copy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) else if (iov_iter_alignment(iter) & align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) copy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) else if (queue_virt_boundary(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) i = *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ret = bio_map_user_iov(rq, &i, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) goto unmap_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) bio = rq->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) } while (iov_iter_count(&i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) unmap_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) blk_rq_unmap_user(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) rq->bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) EXPORT_SYMBOL(blk_rq_map_user_iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) int blk_rq_map_user(struct request_queue *q, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct rq_map_data *map_data, void __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) unsigned long len, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct iovec iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct iov_iter i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) EXPORT_SYMBOL(blk_rq_map_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * blk_rq_unmap_user - unmap a request with user data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * @bio: start of bio list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * supply the original rq->bio from the blk_rq_map_user() return, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * the I/O completion may have changed rq->bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) int blk_rq_unmap_user(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct bio *mapped_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) int ret = 0, ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) while (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) mapped_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) mapped_bio = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (bio->bi_private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ret2 = bio_uncopy_user(mapped_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (ret2 && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) bio_unmap_user(mapped_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) mapped_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) bio = bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) bio_put(mapped_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) EXPORT_SYMBOL(blk_rq_unmap_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * blk_rq_map_kern - map kernel data to a request, for passthrough requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * @q: request queue where request should be inserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * @rq: request to fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * @kbuf: the kernel buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * @len: length of user data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * @gfp_mask: memory allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * Data will be mapped directly if possible. Otherwise a bounce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * buffer is used. Can be called multiple times to append multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) unsigned int len, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int reading = rq_data_dir(rq) == READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) unsigned long addr = (unsigned long) kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct bio *bio, *orig_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (len > (queue_max_hw_sectors(q) << 9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!len || !kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) bio = bio_map_kern(q, kbuf, len, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (IS_ERR(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return PTR_ERR(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) bio->bi_opf &= ~REQ_OP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) bio->bi_opf |= req_op(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) orig_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ret = blk_rq_append_bio(rq, &bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* request is too big */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) bio_put(orig_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) EXPORT_SYMBOL(blk_rq_map_kern);