^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2019 Christoph Hellwig.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) static inline unsigned int bio_max_vecs(unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) return min_t(unsigned, howmany(count, PAGE_SIZE), BIO_MAX_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) xfs_rw_bdev(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) char *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned int is_vmalloc = is_vmalloc_addr(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned int left = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (is_vmalloc && op == REQ_OP_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) flush_kernel_vmap_range(data, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) bio_set_dev(bio, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) bio->bi_iter.bi_sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bio->bi_opf = op | REQ_META | REQ_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct page *page = kmem_to_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned int off = offset_in_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned int len = min_t(unsigned, left, PAGE_SIZE - off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) while (bio_add_page(bio, page, len, off) != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct bio *prev = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) bio_copy_dev(bio, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) bio->bi_iter.bi_sector = bio_end_sector(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) bio->bi_opf = prev->bi_opf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) bio_chain(prev, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) submit_bio(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) data += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) left -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) } while (left > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) error = submit_bio_wait(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (is_vmalloc && op == REQ_OP_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) invalidate_kernel_vmap_range(data, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }