^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/bvec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/fault-inject-usercopy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/splice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/instrumented.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define PIPE_PARANOIA /* for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) size_t left; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) size_t wanted = n; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) __p = i->iov; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) __v.iov_len = min(n, __p->iov_len - skip); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) if (likely(__v.iov_len)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) __v.iov_base = __p->iov_base + skip; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) left = (STEP); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) __v.iov_len -= left; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) skip += __v.iov_len; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) n -= __v.iov_len; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) } else { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) left = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) while (unlikely(!left && n)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) __p++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) __v.iov_len = min(n, __p->iov_len); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (unlikely(!__v.iov_len)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) continue; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) __v.iov_base = __p->iov_base; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) left = (STEP); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) __v.iov_len -= left; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) skip = __v.iov_len; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) n -= __v.iov_len; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) n = wanted - n; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) size_t wanted = n; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __p = i->kvec; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) __v.iov_len = min(n, __p->iov_len - skip); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (likely(__v.iov_len)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) __v.iov_base = __p->iov_base + skip; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) (void)(STEP); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) skip += __v.iov_len; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) n -= __v.iov_len; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) while (unlikely(n)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __p++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __v.iov_len = min(n, __p->iov_len); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (unlikely(!__v.iov_len)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) continue; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __v.iov_base = __p->iov_base; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) (void)(STEP); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) skip = __v.iov_len; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) n -= __v.iov_len; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) n = wanted; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct bvec_iter __start; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) __start.bi_size = n; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __start.bi_bvec_done = skip; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __start.bi_idx = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) for_each_bvec(__v, i->bvec, __bi, __start) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (!__v.bv_len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) continue; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) (void)(STEP); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define iterate_all_kinds(i, n, v, I, B, K) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (likely(n)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) size_t skip = i->iov_offset; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (unlikely(i->type & ITER_BVEC)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct bio_vec v; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct bvec_iter __bi; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) iterate_bvec(i, n, v, __bi, skip, (B)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) } else if (unlikely(i->type & ITER_KVEC)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) const struct kvec *kvec; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct kvec v; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) iterate_kvec(i, n, v, kvec, skip, (K)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) } else if (unlikely(i->type & ITER_DISCARD)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) } else { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) const struct iovec *iov; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct iovec v; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) iterate_iovec(i, n, v, iov, skip, (I)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define iterate_and_advance(i, n, v, I, B, K) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (unlikely(i->count < n)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) n = i->count; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (i->count) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) size_t skip = i->iov_offset; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (unlikely(i->type & ITER_BVEC)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) const struct bio_vec *bvec = i->bvec; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct bio_vec v; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct bvec_iter __bi; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) iterate_bvec(i, n, v, __bi, skip, (B)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) i->nr_segs -= i->bvec - bvec; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) skip = __bi.bi_bvec_done; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) } else if (unlikely(i->type & ITER_KVEC)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) const struct kvec *kvec; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct kvec v; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) iterate_kvec(i, n, v, kvec, skip, (K)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (skip == kvec->iov_len) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) kvec++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) skip = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) i->nr_segs -= kvec - i->kvec; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) i->kvec = kvec; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) } else if (unlikely(i->type & ITER_DISCARD)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) skip += n; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) } else { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) const struct iovec *iov; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct iovec v; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) iterate_iovec(i, n, v, iov, skip, (I)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (skip == iov->iov_len) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) iov++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) skip = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) i->nr_segs -= iov - i->iov; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) i->iov = iov; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) i->count -= n; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) i->iov_offset = skip; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int copyout(void __user *to, const void *from, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (should_fail_usercopy())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (access_ok(to, n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) instrument_copy_to_user(to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) n = raw_copy_to_user(to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static int copyin(void *to, const void __user *from, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (should_fail_usercopy())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (access_ok(from, n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) instrument_copy_from_user(to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) n = raw_copy_from_user(to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) size_t skip, copy, left, wanted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) const struct iovec *iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) char __user *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) void *kaddr, *from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (unlikely(bytes > i->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bytes = i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (unlikely(!bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) wanted = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) iov = i->iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) skip = i->iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) buf = iov->iov_base + skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) copy = min(bytes, iov->iov_len - skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) from = kaddr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* first chunk, usually the only one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) left = copyout(buf, from, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) copy -= left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) skip += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) from += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) bytes -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) while (unlikely(!left && bytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) iov++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) buf = iov->iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) copy = min(bytes, iov->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) left = copyout(buf, from, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) copy -= left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) skip = copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) from += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) bytes -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (likely(!bytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) offset = from - kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) buf += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) copy = min(bytes, iov->iov_len - skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* Too bad - revert to non-atomic kmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) kaddr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) from = kaddr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) left = copyout(buf, from, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) copy -= left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) skip += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) from += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) bytes -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) while (unlikely(!left && bytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) iov++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) buf = iov->iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) copy = min(bytes, iov->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) left = copyout(buf, from, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) copy -= left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) skip = copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) from += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) bytes -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (skip == iov->iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) iov++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) i->count -= wanted - bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) i->nr_segs -= iov - i->iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) i->iov = iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) i->iov_offset = skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return wanted - bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) size_t skip, copy, left, wanted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) const struct iovec *iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) char __user *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) void *kaddr, *to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (unlikely(bytes > i->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) bytes = i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (unlikely(!bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) wanted = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) iov = i->iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) skip = i->iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) buf = iov->iov_base + skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) copy = min(bytes, iov->iov_len - skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) to = kaddr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* first chunk, usually the only one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) left = copyin(to, buf, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) copy -= left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) skip += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) to += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) bytes -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) while (unlikely(!left && bytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) iov++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) buf = iov->iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) copy = min(bytes, iov->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) left = copyin(to, buf, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) copy -= left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) skip = copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) to += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) bytes -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (likely(!bytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) offset = to - kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) buf += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) copy = min(bytes, iov->iov_len - skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* Too bad - revert to non-atomic kmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) kaddr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) to = kaddr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) left = copyin(to, buf, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) copy -= left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) skip += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) to += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) bytes -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) while (unlikely(!left && bytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) iov++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) buf = iov->iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) copy = min(bytes, iov->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) left = copyin(to, buf, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) copy -= left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) skip = copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) to += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) bytes -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (skip == iov->iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) iov++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) i->count -= wanted - bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) i->nr_segs -= iov - i->iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) i->iov = iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) i->iov_offset = skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return wanted - bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #ifdef PIPE_PARANOIA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static bool sanity(const struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned int p_head = pipe->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned int p_tail = pipe->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned int p_mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned int i_head = i->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (i->iov_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct pipe_buffer *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (unlikely(p_occupancy == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto Bad; // pipe must be non-empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (unlikely(i_head != p_head - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) goto Bad; // must be at the last buffer...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) p = &pipe->bufs[i_head & p_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (unlikely(p->offset + p->len != i->iov_offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) goto Bad; // ... at the end of segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (i_head != p_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) goto Bad; // must be right after the last buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) Bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) p_head, p_tail, pipe->ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) for (idx = 0; idx < pipe->ring_size; idx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) printk(KERN_ERR "[%p %p %d %d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) pipe->bufs[idx].ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) pipe->bufs[idx].page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pipe->bufs[idx].offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) pipe->bufs[idx].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #define sanity(i) true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct pipe_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) unsigned int p_tail = pipe->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) unsigned int p_mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned int i_head = i->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) size_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (unlikely(bytes > i->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) bytes = i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (unlikely(!bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (!sanity(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) off = i->iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) buf = &pipe->bufs[i_head & p_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (offset == off && buf->page == page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* merge with the last one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) buf->len += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) i->iov_offset += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) i_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) buf = &pipe->bufs[i_head & p_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (pipe_full(i_head, p_tail, pipe->max_usage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) buf->ops = &page_cache_pipe_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) buf->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) buf->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) buf->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) buf->len = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) pipe->head = i_head + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) i->iov_offset = offset + bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) i->head = i_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) i->count -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * Fault in one or more iovecs of the given iov_iter, to a maximum length of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * bytes. For each iovec, fault in each page that constitutes the iovec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * because it is an invalid address).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) size_t skip = i->iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) const struct iovec *iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct iovec v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (iter_is_iovec(i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) iterate_iovec(i, bytes, v, iov, skip, ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) err = fault_in_pages_readable(v.iov_base, v.iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 0;}))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) EXPORT_SYMBOL(iov_iter_fault_in_readable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) void iov_iter_init(struct iov_iter *i, unsigned int direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) const struct iovec *iov, unsigned long nr_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) WARN_ON(direction & ~(READ | WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) direction &= READ | WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* It will get better. Eventually... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (uaccess_kernel()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) i->type = ITER_KVEC | direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) i->kvec = (struct kvec *)iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) i->type = ITER_IOVEC | direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) i->iov = iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) i->nr_segs = nr_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) i->iov_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) i->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) EXPORT_SYMBOL(iov_iter_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) char *from = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) memcpy(to, from + offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) kunmap_atomic(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) char *to = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) memcpy(to + offset, from, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) kunmap_atomic(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static void memzero_page(struct page *page, size_t offset, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) char *addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) memset(addr + offset, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static inline bool allocated(struct pipe_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return buf->ops == &default_pipe_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static inline void data_start(const struct iov_iter *i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) unsigned int *iter_headp, size_t *offp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) unsigned int p_mask = i->pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned int iter_head = i->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) size_t off = i->iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) off == PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) iter_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *iter_headp = iter_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) *offp = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static size_t push_pipe(struct iov_iter *i, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) int *iter_headp, size_t *offp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) unsigned int p_tail = pipe->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) unsigned int p_mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) unsigned int iter_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) size_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ssize_t left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (unlikely(size > i->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) size = i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (unlikely(!size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) left = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) data_start(i, &iter_head, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *iter_headp = iter_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *offp = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) left -= PAGE_SIZE - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (left <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) pipe->bufs[iter_head & p_mask].len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) iter_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct page *page = alloc_page(GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) buf->ops = &default_pipe_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) buf->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) buf->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) buf->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) buf->len = min_t(ssize_t, left, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) left -= buf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) iter_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) pipe->head = iter_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (left == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return size - left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) unsigned int p_mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unsigned int i_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) size_t n, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (!sanity(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) bytes = n = push_pipe(i, bytes, &i_head, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (unlikely(!n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) i->head = i_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) i->iov_offset = off + chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) n -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) addr += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) i_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) } while (n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) i->count -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) __wsum sum, size_t off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) __wsum next = csum_partial_copy_nocheck(from, to, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return csum_block_add(sum, next, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct csum_state *csstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) unsigned int p_mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) __wsum sum = csstate->csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) size_t off = csstate->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) unsigned int i_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) size_t n, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (!sanity(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) bytes = n = push_pipe(i, bytes, &i_head, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (unlikely(!n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) kunmap_atomic(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) i->head = i_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) i->iov_offset = r + chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) n -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) off += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) addr += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) i_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) } while (n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) i->count -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) csstate->csum = sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) csstate->off = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) const char *from = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (unlikely(iov_iter_is_pipe(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return copy_pipe_to_iter(addr, bytes, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (iter_is_iovec(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) iterate_and_advance(i, bytes, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) memcpy_to_page(v.bv_page, v.bv_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) (from += v.bv_len) - v.bv_len, v.bv_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) EXPORT_SYMBOL(_copy_to_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) #ifdef CONFIG_ARCH_HAS_COPY_MC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static int copyout_mc(void __user *to, const void *from, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (access_ok(to, n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) instrument_copy_to_user(to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) n = copy_mc_to_user((__force void *) to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static unsigned long copy_mc_to_page(struct page *page, size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) const char *from, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) char *to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) to = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) ret = copy_mc_to_kernel(to + offset, from, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) kunmap_atomic(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned int p_mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) unsigned int i_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) size_t n, off, xfer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!sanity(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) bytes = n = push_pipe(i, bytes, &i_head, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (unlikely(!n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) unsigned long rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) off, addr, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) i->head = i_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) i->iov_offset = off + chunk - rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) xfer += chunk - rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (rem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) n -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) addr += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) i_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) } while (n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) i->count -= xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * _copy_mc_to_iter - copy to iter with source memory error exception handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * @addr: source kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * @bytes: total transfer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * @iter: destination iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * The pmem driver deploys this for the dax operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * successfully copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * The main differences between this and typical _copy_to_iter().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * * Typical tail/residue handling after a fault retries the copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * byte-by-byte until the fault happens again. Re-triggering machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * checks is potentially fatal so the implementation uses source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * alignment and poison alignment assumptions to avoid re-triggering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * hardware exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * a short copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) const char *from = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (unlikely(iov_iter_is_pipe(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return copy_mc_pipe_to_iter(addr, bytes, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (iter_is_iovec(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) iterate_and_advance(i, bytes, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) v.iov_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) rem = copy_mc_to_page(v.bv_page, v.bv_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) (from += v.bv_len) - v.bv_len, v.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) curr_addr = (unsigned long) from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) bytes = curr_addr - s_addr - rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) - v.iov_len, v.iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) curr_addr = (unsigned long) from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) bytes = curr_addr - s_addr - rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) #endif /* CONFIG_ARCH_HAS_COPY_MC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) char *to = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (unlikely(iov_iter_is_pipe(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (iter_is_iovec(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) iterate_and_advance(i, bytes, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) v.bv_offset, v.bv_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) EXPORT_SYMBOL(_copy_from_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) char *to = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (unlikely(iov_iter_is_pipe(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (unlikely(i->count < bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (iter_is_iovec(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) iterate_all_kinds(i, bytes, v, ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (copyin((to += v.iov_len) - v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) v.iov_base, v.iov_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 0;}),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) v.bv_offset, v.bv_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) iov_iter_advance(i, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) EXPORT_SYMBOL(_copy_from_iter_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) char *to = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (unlikely(iov_iter_is_pipe(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) iterate_and_advance(i, bytes, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) v.iov_base, v.iov_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) v.bv_offset, v.bv_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) EXPORT_SYMBOL(_copy_from_iter_nocache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * _copy_from_iter_flushcache - write destination through cpu cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * @addr: destination kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * @bytes: total transfer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * @iter: source iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * The pmem driver arranges for filesystem-dax to use this facility via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * dax_copy_from_iter() for ensuring that writes to persistent memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * are flushed through the CPU cache. It is differentiated from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * _copy_from_iter_nocache() in that guarantees all data is flushed for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * all iterator types. The _copy_from_iter_nocache() only attempts to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * bypass the cache for the ITER_IOVEC case, and on some archs may use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * instructions that strand dirty-data in the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) char *to = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (unlikely(iov_iter_is_pipe(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) iterate_and_advance(i, bytes, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) v.iov_base, v.iov_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) v.bv_offset, v.bv_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) v.iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) char *to = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (unlikely(iov_iter_is_pipe(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (unlikely(i->count < bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) iterate_all_kinds(i, bytes, v, ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) v.iov_base, v.iov_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 0;}),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) v.bv_offset, v.bv_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) iov_iter_advance(i, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) EXPORT_SYMBOL(_copy_from_iter_full_nocache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct page *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) size_t v = n + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * The general case needs to access the page order in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * to compute the page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * However, we mostly deal with order-0 pages and thus can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * avoid a possible cache line miss for requests that fit all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * page orders.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (n <= v && v <= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) head = compound_head(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) v += (page - head) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (likely(n <= v && v <= (page_size(head))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (unlikely(!page_copy_sane(page, offset, bytes)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (i->type & (ITER_BVEC|ITER_KVEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) void *kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return wanted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) } else if (unlikely(iov_iter_is_discard(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (unlikely(i->count < bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) bytes = i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) i->count -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) } else if (likely(!iov_iter_is_pipe(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return copy_page_to_iter_iovec(page, offset, bytes, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return copy_page_to_iter_pipe(page, offset, bytes, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) EXPORT_SYMBOL(copy_page_to_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (unlikely(!page_copy_sane(page, offset, bytes)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (i->type & (ITER_BVEC|ITER_KVEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) void *kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return wanted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return copy_page_from_iter_iovec(page, offset, bytes, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) EXPORT_SYMBOL(copy_page_from_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) static size_t pipe_zero(size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) unsigned int p_mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) unsigned int i_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) size_t n, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (!sanity(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) bytes = n = push_pipe(i, bytes, &i_head, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (unlikely(!n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) i->head = i_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) i->iov_offset = off + chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) n -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) i_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) } while (n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) i->count -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (unlikely(iov_iter_is_pipe(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return pipe_zero(bytes, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) iterate_and_advance(i, bytes, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) clear_user(v.iov_base, v.iov_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) memzero_page(v.bv_page, v.bv_offset, v.bv_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) memset(v.iov_base, 0, v.iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) EXPORT_SYMBOL(iov_iter_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) size_t iov_iter_copy_from_user_atomic(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct iov_iter *i, unsigned long offset, size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) char *kaddr = kmap_atomic(page), *p = kaddr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (unlikely(!page_copy_sane(page, offset, bytes))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) iterate_all_kinds(i, bytes, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) v.bv_offset, v.bv_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) static inline void pipe_truncate(struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) unsigned int p_tail = pipe->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) unsigned int p_head = pipe->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) unsigned int p_mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (!pipe_empty(p_head, p_tail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct pipe_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) unsigned int i_head = i->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) size_t off = i->iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) buf = &pipe->bufs[i_head & p_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) buf->len = off - buf->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) i_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) while (p_head != i_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) p_head--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) pipe->head = p_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static void pipe_advance(struct iov_iter *i, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (unlikely(i->count < size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) size = i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct pipe_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) unsigned int p_mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) unsigned int i_head = i->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) size_t off = i->iov_offset, left = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (off) /* make it relative to the beginning of buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) left += off - pipe->bufs[i_head & p_mask].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) buf = &pipe->bufs[i_head & p_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (left <= buf->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) left -= buf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) i_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) i->head = i_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) i->iov_offset = buf->offset + left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) i->count -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /* ... and discard everything past that point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) pipe_truncate(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) void iov_iter_advance(struct iov_iter *i, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (unlikely(iov_iter_is_pipe(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) pipe_advance(i, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (unlikely(iov_iter_is_discard(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) i->count -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) iterate_and_advance(i, size, v, 0, 0, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) EXPORT_SYMBOL(iov_iter_advance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) void iov_iter_revert(struct iov_iter *i, size_t unroll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (!unroll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (WARN_ON(unroll > MAX_RW_COUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) i->count += unroll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (unlikely(iov_iter_is_pipe(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) unsigned int p_mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) unsigned int i_head = i->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) size_t off = i->iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) size_t n = off - b->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (unroll < n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) off -= unroll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) unroll -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (!unroll && i_head == i->start_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) i_head--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) b = &pipe->bufs[i_head & p_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) off = b->offset + b->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) i->iov_offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) i->head = i_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) pipe_truncate(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (unlikely(iov_iter_is_discard(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (unroll <= i->iov_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) i->iov_offset -= unroll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) unroll -= i->iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (iov_iter_is_bvec(i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) const struct bio_vec *bvec = i->bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) size_t n = (--bvec)->bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) i->nr_segs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (unroll <= n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) i->bvec = bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) i->iov_offset = n - unroll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) unroll -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) } else { /* same logics for iovec and kvec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) const struct iovec *iov = i->iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) size_t n = (--iov)->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) i->nr_segs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (unroll <= n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) i->iov = iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) i->iov_offset = n - unroll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) unroll -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) EXPORT_SYMBOL(iov_iter_revert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * Return the count of just the current iov_iter segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) size_t iov_iter_single_seg_count(const struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (unlikely(iov_iter_is_pipe(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return i->count; // it is a silly place, anyway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (i->nr_segs == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (unlikely(iov_iter_is_discard(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) else if (iov_iter_is_bvec(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return min(i->count, i->bvec->bv_len - i->iov_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return min(i->count, i->iov->iov_len - i->iov_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) EXPORT_SYMBOL(iov_iter_single_seg_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) const struct kvec *kvec, unsigned long nr_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) WARN_ON(direction & ~(READ | WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) i->type = ITER_KVEC | (direction & (READ | WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) i->kvec = kvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) i->nr_segs = nr_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) i->iov_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) i->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) EXPORT_SYMBOL(iov_iter_kvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) const struct bio_vec *bvec, unsigned long nr_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) WARN_ON(direction & ~(READ | WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) i->type = ITER_BVEC | (direction & (READ | WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) i->bvec = bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) i->nr_segs = nr_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) i->iov_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) i->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) EXPORT_SYMBOL(iov_iter_bvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) BUG_ON(direction != READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) i->type = ITER_PIPE | READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) i->pipe = pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) i->head = pipe->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) i->iov_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) i->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) i->start_head = i->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) EXPORT_SYMBOL(iov_iter_pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * iov_iter_discard - Initialise an I/O iterator that discards data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * @i: The iterator to initialise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * @direction: The direction of the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * @count: The size of the I/O buffer in bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * Set up an I/O iterator that just discards everything that's written to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * It's only available as a READ iterator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) BUG_ON(direction != READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) i->type = ITER_DISCARD | READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) i->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) i->iov_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) EXPORT_SYMBOL(iov_iter_discard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) unsigned long iov_iter_alignment(const struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) unsigned long res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) size_t size = i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (unlikely(iov_iter_is_pipe(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) unsigned int p_mask = i->pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return size | i->iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) iterate_all_kinds(i, size, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) (res |= (unsigned long)v.iov_base | v.iov_len, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) res |= v.bv_offset | v.bv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) res |= (unsigned long)v.iov_base | v.iov_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) EXPORT_SYMBOL(iov_iter_alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) unsigned long res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) size_t size = i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return ~0U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) iterate_all_kinds(i, size, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) (res |= (!res ? 0 : (unsigned long)v.iov_base) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) (size != v.iov_len ? size : 0), 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) (size != v.bv_len ? size : 0)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) (res |= (!res ? 0 : (unsigned long)v.iov_base) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) (size != v.iov_len ? size : 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) EXPORT_SYMBOL(iov_iter_gap_alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static inline ssize_t __pipe_get_pages(struct iov_iter *i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) size_t maxsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) int iter_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) size_t *start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) unsigned int p_mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) ssize_t n = push_pipe(i, maxsize, &iter_head, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) maxsize = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) n += *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) while (n > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) iter_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) n -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return maxsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static ssize_t pipe_get_pages(struct iov_iter *i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct page **pages, size_t maxsize, unsigned maxpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) size_t *start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) unsigned int iter_head, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) size_t capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (!maxsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (!sanity(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) data_start(i, &iter_head, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) /* Amount of free space: some of this one + all after this one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) capacity = min(npages, maxpages) * PAGE_SIZE - *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) ssize_t iov_iter_get_pages(struct iov_iter *i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) struct page **pages, size_t maxsize, unsigned maxpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) size_t *start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (maxsize > i->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) maxsize = i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (unlikely(iov_iter_is_pipe(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return pipe_get_pages(i, pages, maxsize, maxpages, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (unlikely(iov_iter_is_discard(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) iterate_all_kinds(i, maxsize, v, ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) unsigned long addr = (unsigned long)v.iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (len > maxpages * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) len = maxpages * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) addr &= ~(PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) n = DIV_ROUND_UP(len, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) res = get_user_pages_fast(addr, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (unlikely(res <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return (res == n ? len : res * PAGE_SIZE) - *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 0;}),({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* can't be more than PAGE_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *start = v.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) get_page(*pages = v.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return v.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }),({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) EXPORT_SYMBOL(iov_iter_get_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) static struct page **get_pages_array(size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) struct page ***pages, size_t maxsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) size_t *start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct page **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) unsigned int iter_head, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) ssize_t n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (!maxsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (!sanity(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) data_start(i, &iter_head, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) /* Amount of free space: some of this one + all after this one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) n = npages * PAGE_SIZE - *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (maxsize > n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) maxsize = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) p = get_pages_array(npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) n = __pipe_get_pages(i, maxsize, p, iter_head, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (n > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) *pages = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) kvfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) struct page ***pages, size_t maxsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) size_t *start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) struct page **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (maxsize > i->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) maxsize = i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (unlikely(iov_iter_is_pipe(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) return pipe_get_pages_alloc(i, pages, maxsize, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (unlikely(iov_iter_is_discard(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) iterate_all_kinds(i, maxsize, v, ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) unsigned long addr = (unsigned long)v.iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) addr &= ~(PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) n = DIV_ROUND_UP(len, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) p = get_pages_array(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) res = get_user_pages_fast(addr, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (unlikely(res <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) kvfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) *pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) *pages = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return (res == n ? len : res * PAGE_SIZE) - *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 0;}),({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* can't be more than PAGE_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) *start = v.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) *pages = p = get_pages_array(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) get_page(*p = v.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return v.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }),({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) EXPORT_SYMBOL(iov_iter_get_pages_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) char *to = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) __wsum sum, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) size_t off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) sum = *csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) iterate_and_advance(i, bytes, v, ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) next = csum_and_copy_from_user(v.iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) (to += v.iov_len) - v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) v.iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) sum = csum_block_add(sum, next, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) off += v.iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) next ? 0 : v.iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }), ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) char *p = kmap_atomic(v.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) p + v.bv_offset, v.bv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) sum, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) kunmap_atomic(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) off += v.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }),({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) v.iov_base, v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) sum, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) off += v.iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) *csum = sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) EXPORT_SYMBOL(csum_and_copy_from_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) char *to = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) __wsum sum, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) size_t off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) sum = *csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (unlikely(i->count < bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) iterate_all_kinds(i, bytes, v, ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) next = csum_and_copy_from_user(v.iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) (to += v.iov_len) - v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) v.iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (!next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) sum = csum_block_add(sum, next, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) off += v.iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }), ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) char *p = kmap_atomic(v.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) p + v.bv_offset, v.bv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) sum, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) kunmap_atomic(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) off += v.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }),({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) v.iov_base, v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) sum, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) off += v.iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) *csum = sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) iov_iter_advance(i, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) EXPORT_SYMBOL(csum_and_copy_from_iter_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) struct csum_state *csstate = _csstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) const char *from = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) __wsum sum, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) size_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if (unlikely(iov_iter_is_pipe(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) sum = csstate->csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) off = csstate->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (unlikely(iov_iter_is_discard(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) WARN_ON(1); /* for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) iterate_and_advance(i, bytes, v, ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) v.iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) v.iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) sum = csum_block_add(sum, next, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) off += v.iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) next ? 0 : v.iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }), ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) char *p = kmap_atomic(v.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) sum = csum_and_memcpy(p + v.bv_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) (from += v.bv_len) - v.bv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) v.bv_len, sum, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) kunmap_atomic(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) off += v.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }),({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) sum = csum_and_memcpy(v.iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) (from += v.iov_len) - v.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) v.iov_len, sum, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) off += v.iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) csstate->csum = sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) csstate->off = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) EXPORT_SYMBOL(csum_and_copy_to_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) #ifdef CONFIG_CRYPTO_HASH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) struct ahash_request *hash = hashp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) size_t copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) copied = copy_to_iter(addr, bytes, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) sg_init_one(&sg, addr, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ahash_request_set_crypt(hash, &sg, NULL, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) crypto_ahash_update(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) EXPORT_SYMBOL(hash_and_copy_to_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) int iov_iter_npages(const struct iov_iter *i, int maxpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) size_t size = i->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) int npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (unlikely(iov_iter_is_discard(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (unlikely(iov_iter_is_pipe(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct pipe_inode_info *pipe = i->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) unsigned int iter_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) size_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (!sanity(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) data_start(i, &iter_head, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) /* some of this one + all after this one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (npages >= maxpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return maxpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) } else iterate_all_kinds(i, size, v, ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) unsigned long p = (unsigned long)v.iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) - p / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (npages >= maxpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) return maxpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 0;}),({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) npages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (npages >= maxpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) return maxpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }),({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) unsigned long p = (unsigned long)v.iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) - p / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (npages >= maxpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return maxpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) return npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) EXPORT_SYMBOL(iov_iter_npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) *new = *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (unlikely(iov_iter_is_pipe(new))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (unlikely(iov_iter_is_discard(new)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (iov_iter_is_bvec(new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return new->bvec = kmemdup(new->bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) new->nr_segs * sizeof(struct bio_vec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) /* iovec and kvec have identical layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return new->iov = kmemdup(new->iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) new->nr_segs * sizeof(struct iovec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) EXPORT_SYMBOL(dup_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) static int copy_compat_iovec_from_user(struct iovec *iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) const struct iovec __user *uvec, unsigned long nr_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) const struct compat_iovec __user *uiov =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) (const struct compat_iovec __user *)uvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) int ret = -EFAULT, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) for (i = 0; i < nr_segs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) compat_uptr_t buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) compat_ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) /* check for compat_size_t not fitting in compat_ssize_t .. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (len < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) goto uaccess_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) iov[i].iov_base = compat_ptr(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) iov[i].iov_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) uaccess_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) user_access_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) static int copy_iovec_from_user(struct iovec *iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) const struct iovec __user *uvec, unsigned long nr_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) unsigned long seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) for (seg = 0; seg < nr_segs; seg++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if ((ssize_t)iov[seg].iov_len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct iovec *iovec_from_user(const struct iovec __user *uvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) unsigned long nr_segs, unsigned long fast_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) struct iovec *fast_iov, bool compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) struct iovec *iov = fast_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * SuS says "The readv() function *may* fail if the iovcnt argument was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * less than or equal to 0, or greater than {IOV_MAX}. Linux has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * traditionally returned zero for zero segments, so...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (nr_segs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (nr_segs > UIO_MAXIOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (nr_segs > fast_segs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (!iov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) ret = copy_iovec_from_user(iov, uvec, nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (iov != fast_iov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) kfree(iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) ssize_t __import_iovec(int type, const struct iovec __user *uvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct iov_iter *i, bool compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) ssize_t total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) unsigned long seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) struct iovec *iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (IS_ERR(iov)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) *iovp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) return PTR_ERR(iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * According to the Single Unix Specification we should return EINVAL if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) * an element length is < 0 when cast to ssize_t or if the total length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * would overflow the ssize_t return value of the system call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * overflow case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) for (seg = 0; seg < nr_segs; seg++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) ssize_t len = (ssize_t)iov[seg].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (!access_ok(iov[seg].iov_base, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (iov != *iovp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) kfree(iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) *iovp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (len > MAX_RW_COUNT - total_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) len = MAX_RW_COUNT - total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) iov[seg].iov_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) total_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) iov_iter_init(i, type, iov, nr_segs, total_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (iov == *iovp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) *iovp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) *iovp = iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * import_iovec() - Copy an array of &struct iovec from userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * into the kernel, check that it is valid, and initialize a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * &struct iov_iter iterator to access it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * @type: One of %READ or %WRITE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * @uvec: Pointer to the userspace array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * @nr_segs: Number of elements in userspace array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) * @fast_segs: Number of elements in @iov.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * @iovp: (input and output parameter) Pointer to pointer to (usually small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * on-stack) kernel array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * @i: Pointer to iterator that will be initialized on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) * If the array pointed to by *@iov is large enough to hold all @nr_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * then this function places %NULL in *@iov on return. Otherwise, a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * array will be allocated and the result placed in *@iov. This means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * the caller may call kfree() on *@iov regardless of whether the small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) * on-stack array was used or not (and regardless of whether this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) * returns an error or not).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * Return: Negative error code on error, bytes imported on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) ssize_t import_iovec(int type, const struct iovec __user *uvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) unsigned nr_segs, unsigned fast_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct iovec **iovp, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) in_compat_syscall());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) EXPORT_SYMBOL(import_iovec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) int import_single_range(int rw, void __user *buf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct iovec *iov, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (len > MAX_RW_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) len = MAX_RW_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (unlikely(!access_ok(buf, len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) iov->iov_base = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) iov->iov_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) iov_iter_init(i, rw, iov, 1, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) EXPORT_SYMBOL(import_single_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) int (*f)(struct kvec *vec, void *context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) struct kvec w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (!bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) iterate_all_kinds(i, bytes, v, -EINVAL, ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) w.iov_base = kmap(v.bv_page) + v.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) w.iov_len = v.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) err = f(&w, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) kunmap(v.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) err;}), ({
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) w = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) err = f(&w, context);})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) EXPORT_SYMBOL(iov_iter_for_each_range);