^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/net/sunrpc/xdr.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Generic XDR support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sunrpc/xdr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sunrpc/msg_prot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/bvec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <trace/events/sunrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static void _copy_to_pages(struct page **, size_t, const char *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * XDR functions for basic NFS types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) __be32 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned int quadlen = XDR_QUADLEN(obj->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) p[quadlen] = 0; /* zero trailing bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *p++ = cpu_to_be32(obj->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) memcpy(p, obj->data, obj->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return p + XDR_QUADLEN(obj->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) EXPORT_SYMBOL_GPL(xdr_encode_netobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) __be32 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) obj->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) obj->data = (u8 *) p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return p + XDR_QUADLEN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) EXPORT_SYMBOL_GPL(xdr_decode_netobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * xdr_encode_opaque_fixed - Encode fixed length opaque data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @p: pointer to current position in XDR buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * @ptr: pointer to data to encode (or NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * @nbytes: size of data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Copy the array of data of length nbytes at ptr to the XDR buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * at position p, then align to the next 32-bit boundary by padding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * with zero bytes (see RFC1832).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Note: if ptr is NULL, only the padding is performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Returns the updated current XDR buffer position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (likely(nbytes != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned int quadlen = XDR_QUADLEN(nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned int padding = (quadlen << 2) - nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (ptr != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) memcpy(p, ptr, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (padding != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) memset((char *)p + nbytes, 0, padding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) p += quadlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * xdr_encode_opaque - Encode variable length opaque data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * @p: pointer to current position in XDR buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * @ptr: pointer to data to encode (or NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * @nbytes: size of data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Returns the updated current XDR buffer position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *p++ = cpu_to_be32(nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return xdr_encode_opaque_fixed(p, ptr, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) EXPORT_SYMBOL_GPL(xdr_encode_opaque);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) __be32 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) xdr_encode_string(__be32 *p, const char *string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return xdr_encode_array(p, string, strlen(string));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) EXPORT_SYMBOL_GPL(xdr_encode_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) __be32 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) xdr_decode_string_inplace(__be32 *p, char **sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned int *lenp, unsigned int maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) len = be32_to_cpu(*p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (len > maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *lenp = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *sp = (char *) p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return p + XDR_QUADLEN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * @buf: XDR buffer where string resides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * @len: length of string, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) xdr_terminate_string(struct xdr_buf *buf, const u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) char *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) kaddr = kmap_atomic(buf->pages[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) kaddr[buf->page_base + len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) EXPORT_SYMBOL_GPL(xdr_terminate_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) xdr_buf_pagecount(struct xdr_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!buf->page_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) size_t i, n = xdr_buf_pagecount(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (n != 0 && buf->bvec == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!buf->bvec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) buf->bvec[i].bv_page = buf->pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) buf->bvec[i].bv_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) buf->bvec[i].bv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) xdr_free_bvec(struct xdr_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) kfree(buf->bvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) buf->bvec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * xdr_inline_pages - Prepare receive buffer for a large reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @xdr: xdr_buf into which reply will be placed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @offset: expected offset where data payload will start, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * @pages: vector of struct page pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * @base: offset in first page where receive should start, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @len: expected size of the upper layer data payload, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct page **pages, unsigned int base, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct kvec *head = xdr->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct kvec *tail = xdr->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) char *buf = (char *)head->iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned int buflen = head->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) head->iov_len = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) xdr->pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) xdr->page_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) xdr->page_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) tail->iov_base = buf + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) tail->iov_len = buflen - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if ((xdr->page_len & 3) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) tail->iov_len -= sizeof(__be32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) xdr->buflen += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) EXPORT_SYMBOL_GPL(xdr_inline_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Helper routines for doing 'memmove' like operations on a struct xdr_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * _shift_data_left_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @pages: vector of pages containing both the source and dest memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @pgto_base: page vector address of destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @pgfrom_base: page vector address of source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Note: the addresses pgto_base and pgfrom_base are both calculated in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * the same way:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * if a memory area starts at byte 'base' in page 'pages[i]',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * then its address is given as (i << PAGE_CACHE_SHIFT) + base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * Alse note: pgto_base must be < pgfrom_base, but the memory areas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * they point to may overlap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) _shift_data_left_pages(struct page **pages, size_t pgto_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) size_t pgfrom_base, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct page **pgfrom, **pgto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) char *vfrom, *vto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) size_t copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) BUG_ON(pgfrom_base <= pgto_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) pgto = pages + (pgto_base >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) pgto_base &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) pgfrom_base &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (pgto_base >= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pgto_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pgto++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (pgfrom_base >= PAGE_SIZE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pgfrom_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) pgfrom++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) copy = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (copy > (PAGE_SIZE - pgto_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) copy = PAGE_SIZE - pgto_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (copy > (PAGE_SIZE - pgfrom_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) copy = PAGE_SIZE - pgfrom_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) vto = kmap_atomic(*pgto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (*pgto != *pgfrom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) vfrom = kmap_atomic(*pgfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) kunmap_atomic(vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) memmove(vto + pgto_base, vto + pgfrom_base, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) flush_dcache_page(*pgto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) kunmap_atomic(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pgto_base += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) pgfrom_base += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) } while ((len -= copy) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) _shift_data_left_tail(struct xdr_buf *buf, unsigned int pgto, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct kvec *tail = buf->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (len > tail->iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) len = tail->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) _copy_to_pages(buf->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) buf->page_base + pgto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) (char *)tail->iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) tail->iov_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (tail->iov_len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) memmove((char *)tail->iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) tail->iov_base + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) tail->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * _shift_data_right_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @pages: vector of pages containing both the source and dest memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * @pgto_base: page vector address of destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * @pgfrom_base: page vector address of source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Note: the addresses pgto_base and pgfrom_base are both calculated in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * the same way:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * if a memory area starts at byte 'base' in page 'pages[i]',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * then its address is given as (i << PAGE_SHIFT) + base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Also note: pgfrom_base must be < pgto_base, but the memory areas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * they point to may overlap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) _shift_data_right_pages(struct page **pages, size_t pgto_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) size_t pgfrom_base, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct page **pgfrom, **pgto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) char *vfrom, *vto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) size_t copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) BUG_ON(pgto_base <= pgfrom_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) pgto_base += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) pgfrom_base += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) pgto = pages + (pgto_base >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) pgto_base &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) pgfrom_base &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Are any pointers crossing a page boundary? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (pgto_base == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pgto_base = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) pgto--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (pgfrom_base == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) pgfrom_base = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pgfrom--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) copy = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (copy > pgto_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) copy = pgto_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (copy > pgfrom_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) copy = pgfrom_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) pgto_base -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pgfrom_base -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) vto = kmap_atomic(*pgto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (*pgto != *pgfrom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) vfrom = kmap_atomic(*pgfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) kunmap_atomic(vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) memmove(vto + pgto_base, vto + pgfrom_base, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) flush_dcache_page(*pgto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) kunmap_atomic(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) } while ((len -= copy) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) _shift_data_right_tail(struct xdr_buf *buf, unsigned int pgfrom, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct kvec *tail = buf->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned int tailbuf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) size_t copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* Shift the tail first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (tailbuf_len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) unsigned int free_space = tailbuf_len - tail->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (len < free_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) free_space = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (len > free_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) len = free_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) tail->iov_len += free_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) copy = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (tail->iov_len > len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) char *p = (char *)tail->iov_base + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) memmove(p, tail->iov_base, tail->iov_len - free_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) result += tail->iov_len - free_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) copy = tail->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Copy from the inlined pages into the tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) _copy_from_pages((char *)tail->iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) buf->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) buf->page_base + pgfrom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) result += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * _copy_to_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * @pages: array of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * @pgbase: page vector address of destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * @p: pointer to source data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * @len: length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * Copies data from an arbitrary memory location into an array of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * The copy is assumed to be non-overlapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct page **pgto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) char *vto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) size_t copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pgto = pages + (pgbase >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) pgbase &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) copy = PAGE_SIZE - pgbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (copy > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) copy = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) vto = kmap_atomic(*pgto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) memcpy(vto + pgbase, p, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) kunmap_atomic(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) len -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) pgbase += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (pgbase == PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) flush_dcache_page(*pgto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) pgbase = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) pgto++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) p += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) flush_dcache_page(*pgto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * _copy_from_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * @p: pointer to destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * @pages: array of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * @pgbase: offset of source data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * @len: length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * Copies data into an arbitrary memory location from an array of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * The copy is assumed to be non-overlapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct page **pgfrom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) char *vfrom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) size_t copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) pgfrom = pages + (pgbase >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) pgbase &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) copy = PAGE_SIZE - pgbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (copy > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) copy = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) vfrom = kmap_atomic(*pgfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) memcpy(p, vfrom + pgbase, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) kunmap_atomic(vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) pgbase += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (pgbase == PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) pgbase = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pgfrom++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) p += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) } while ((len -= copy) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) EXPORT_SYMBOL_GPL(_copy_from_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * _zero_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * @pages: array of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * @pgbase: beginning page vector address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * @len: length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) _zero_pages(struct page **pages, size_t pgbase, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct page **page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) char *vpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) size_t zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) page = pages + (pgbase >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) pgbase &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) zero = PAGE_SIZE - pgbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (zero > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) zero = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) vpage = kmap_atomic(*page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) memset(vpage + pgbase, 0, zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) kunmap_atomic(vpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) flush_dcache_page(*page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) pgbase = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) } while ((len -= zero) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * xdr_shrink_bufhead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * @buf: xdr_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * @len: bytes to remove from buf->head[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * Shrinks XDR buffer's header kvec buf->head[0] by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * 'len' bytes. The extra data is not lost, but is instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * moved into the inlined pages and/or the tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct kvec *head, *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) size_t copy, offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) unsigned int pglen = buf->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unsigned int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) tail = buf->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) head = buf->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) WARN_ON_ONCE(len > head->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (len > head->iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) len = head->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* Shift the tail first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (tail->iov_len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (tail->iov_len > len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) copy = tail->iov_len - len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) memmove((char *)tail->iov_base + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) tail->iov_base, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) result += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* Copy from the inlined pages into the tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) copy = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (copy > pglen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) copy = pglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) offs = len - copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (offs >= tail->iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) copy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) else if (copy > tail->iov_len - offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) copy = tail->iov_len - offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (copy != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) _copy_from_pages((char *)tail->iov_base + offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) buf->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) buf->page_base + pglen + offs - len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) result += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* Do we also need to copy data from the head into the tail ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (len > pglen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) offs = copy = len - pglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (copy > tail->iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) copy = tail->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) memcpy(tail->iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) (char *)head->iov_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) head->iov_len - offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) result += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* Now handle pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (pglen != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (pglen > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) _shift_data_right_pages(buf->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) buf->page_base + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) buf->page_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) pglen - len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) copy = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (len > pglen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) copy = pglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) _copy_to_pages(buf->pages, buf->page_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) (char *)head->iov_base + head->iov_len - len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) result += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) head->iov_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) buf->buflen -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Have we truncated the message? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (buf->len > buf->buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) buf->len = buf->buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * @buf: xdr_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * @len: bytes to remove from buf->pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * The extra data is not lost, but is instead moved into buf->tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * Returns the actual number of bytes moved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) unsigned int pglen = buf->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) unsigned int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (len > buf->page_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) len = buf-> page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) result = _shift_data_right_tail(buf, pglen - len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) buf->page_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) buf->buflen -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Have we truncated the message? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (buf->len > buf->buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) buf->len = buf->buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) xdr_shift_buf(struct xdr_buf *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) xdr_shrink_bufhead(buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) EXPORT_SYMBOL_GPL(xdr_shift_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * xdr_stream_pos - Return the current offset from the start of the xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * @xdr: pointer to struct xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) EXPORT_SYMBOL_GPL(xdr_stream_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * xdr_page_pos - Return the current offset from the start of the xdr pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * @xdr: pointer to struct xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) unsigned int xdr_page_pos(const struct xdr_stream *xdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned int pos = xdr_stream_pos(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) WARN_ON(pos < xdr->buf->head[0].iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return pos - xdr->buf->head[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) EXPORT_SYMBOL_GPL(xdr_page_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * xdr_init_encode - Initialize a struct xdr_stream for sending data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * @xdr: pointer to xdr_stream struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * @buf: pointer to XDR buffer in which to encode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * @p: current pointer inside XDR buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * @rqst: pointer to controlling rpc_rqst, for debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * Note: at the moment the RPC client only passes the length of our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * scratch buffer in the xdr_buf's header kvec. Previously this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * meant we needed to call xdr_adjust_iovec() after encoding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * data. With the new scheme, the xdr_stream manages the details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * of the buffer length, and takes care of adjusting the kvec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * length for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct rpc_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct kvec *iov = buf->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) xdr_set_scratch_buffer(xdr, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) BUG_ON(scratch_len < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) xdr->buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) xdr->iov = iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) BUG_ON(iov->iov_len > scratch_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (p != xdr->p && p != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) BUG_ON(p < xdr->p || p > xdr->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) len = (char *)p - (char *)xdr->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) xdr->p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) buf->len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) iov->iov_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) xdr->rqst = rqst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) EXPORT_SYMBOL_GPL(xdr_init_encode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * xdr_commit_encode - Ensure all data is written to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * @xdr: pointer to xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * We handle encoding across page boundaries by giving the caller a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * temporary location to write to, then later copying the data into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * place; xdr_commit_encode does that copying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * Normally the caller doesn't need to call this directly, as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * following xdr_reserve_space will do it. But an explicit call may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * required at the end of encoding, or any other time when the xdr_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * data might be read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) inline void xdr_commit_encode(struct xdr_stream *xdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int shift = xdr->scratch.iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) void *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (shift == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) page = page_address(*xdr->page_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) memcpy(xdr->scratch.iov_base, page, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) memmove(page, page + shift, (void *)xdr->p - page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) xdr->scratch.iov_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) EXPORT_SYMBOL_GPL(xdr_commit_encode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int space_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) int frag1bytes, frag2bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (nbytes > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) goto out_overflow; /* Bigger buffers require special handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (xdr->buf->len + nbytes > xdr->buf->buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) goto out_overflow; /* Sorry, we're totally out of space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) frag1bytes = (xdr->end - xdr->p) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) frag2bytes = nbytes - frag1bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (xdr->iov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) xdr->iov->iov_len += frag1bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) xdr->buf->page_len += frag1bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) xdr->page_ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) xdr->iov = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * If the last encode didn't end exactly on a page boundary, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * next one will straddle boundaries. Encode into the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * page, then copy it back later in xdr_commit_encode. We use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * the "scratch" iov to track any temporarily unused fragment of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * space at the end of the previous buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) xdr->scratch.iov_base = xdr->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) xdr->scratch.iov_len = frag1bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) p = page_address(*xdr->page_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * Note this is where the next encode will start after we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * shifted this one back:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) xdr->p = (void *)p + frag2bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) space_left = xdr->buf->buflen - xdr->buf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) xdr->buf->page_len += frag2bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) xdr->buf->len += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) out_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) trace_rpc_xdr_overflow(xdr, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * xdr_reserve_space - Reserve buffer space for sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * @xdr: pointer to xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * @nbytes: number of bytes to reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * Checks that we have enough buffer space to encode 'nbytes' more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * bytes of data. If so, update the total xdr_buf length, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * adjust the length of the current kvec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) __be32 *p = xdr->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) __be32 *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) xdr_commit_encode(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* align nbytes on the next 32-bit boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) nbytes += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) nbytes &= ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) q = p + (nbytes >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (unlikely(q > xdr->end || q < p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return xdr_get_next_encode_buffer(xdr, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) xdr->p = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (xdr->iov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) xdr->iov->iov_len += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) xdr->buf->page_len += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) xdr->buf->len += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) EXPORT_SYMBOL_GPL(xdr_reserve_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * @xdr: pointer to xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * @vec: pointer to a kvec array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * @nbytes: number of bytes to reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * Reserves enough buffer space to encode 'nbytes' of data and stores the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * pointers in 'vec'. The size argument passed to xdr_reserve_space() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * determined based on the number of bytes remaining in the current page to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * avoid invalidating iov_base pointers when xdr_commit_encode() is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) int thislen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) int v = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * svcrdma requires every READ payload to start somewhere
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * in xdr->pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (xdr->iov == xdr->buf->head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) xdr->iov = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) xdr->end = xdr->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) while (nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) thislen = xdr->buf->page_len % PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) p = xdr_reserve_space(xdr, thislen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) vec[v].iov_base = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) vec[v].iov_len = thislen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) v++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) nbytes -= thislen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) EXPORT_SYMBOL_GPL(xdr_reserve_space_vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * xdr_truncate_encode - truncate an encode buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * @xdr: pointer to xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * @len: new length of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * Truncates the xdr stream, so that xdr->buf->len == len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * and xdr->p points at offset len from the start of the buffer, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * head, tail, and page lengths are adjusted to correspond.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * If this means moving xdr->p to a different buffer, we assume that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * the end pointer should be set to the end of the current page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * except in the case of the head buffer when we assume the head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * buffer's current length represents the end of the available buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * This is *not* safe to use on a buffer that already has inlined page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * cache pages (as in a zero-copy server read reply), except for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * simple case of truncating from one position in the tail to another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct xdr_buf *buf = xdr->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct kvec *head = buf->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct kvec *tail = buf->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) int fraglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) int new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (len > buf->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) xdr_commit_encode(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) fraglen = min_t(int, buf->len - len, tail->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) tail->iov_len -= fraglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) buf->len -= fraglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (tail->iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) xdr->p = tail->iov_base + tail->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) WARN_ON_ONCE(!xdr->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) WARN_ON_ONCE(!xdr->iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) WARN_ON_ONCE(fraglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) fraglen = min_t(int, buf->len - len, buf->page_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) buf->page_len -= fraglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) buf->len -= fraglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) new = buf->page_base + buf->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (buf->page_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) xdr->p = page_address(*xdr->page_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) xdr->end = (void *)xdr->p + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) WARN_ON_ONCE(xdr->iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (fraglen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) xdr->end = head->iov_base + head->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* (otherwise assume xdr->end is already set) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) xdr->page_ptr--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) head->iov_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) buf->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) xdr->p = head->iov_base + head->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) xdr->iov = buf->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) EXPORT_SYMBOL(xdr_truncate_encode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * xdr_restrict_buflen - decrease available buffer space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * @xdr: pointer to xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * @newbuflen: new maximum number of bytes available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * Adjust our idea of how much space is available in the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * If we've already used too much space in the buffer, returns -1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * If the available space is already smaller than newbuflen, returns 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * and ensures xdr->end is set at most offset newbuflen from the start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * of the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct xdr_buf *buf = xdr->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) int end_offset = buf->len + left_in_this_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (newbuflen < 0 || newbuflen < buf->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (newbuflen > buf->buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (newbuflen < end_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) xdr->end = (void *)xdr->end + newbuflen - end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) buf->buflen = newbuflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) EXPORT_SYMBOL(xdr_restrict_buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * @xdr: pointer to xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * @pages: list of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * @base: offset of first byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * @len: length of data in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct xdr_buf *buf = xdr->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct kvec *iov = buf->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) buf->pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) buf->page_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) buf->page_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) iov->iov_base = (char *)xdr->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) iov->iov_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) xdr->iov = iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (len & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) unsigned int pad = 4 - (len & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) BUG_ON(xdr->p >= xdr->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) iov->iov_base = (char *)xdr->p + (len & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) iov->iov_len += pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) len += pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) *xdr->p++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) buf->buflen += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) buf->len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) EXPORT_SYMBOL_GPL(xdr_write_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (len > iov->iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) len = iov->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) xdr->p = (__be32*)iov->iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) xdr->end = (__be32*)(iov->iov_base + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) xdr->iov = iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) xdr->page_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) static int xdr_set_page_base(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) unsigned int base, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) unsigned int pgnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) unsigned int maxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) unsigned int pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) unsigned int pgend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) maxlen = xdr->buf->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (base >= maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) maxlen -= base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (len > maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) len = maxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) base += xdr->buf->page_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) pgnr = base >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) xdr->page_ptr = &xdr->buf->pages[pgnr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) kaddr = page_address(*xdr->page_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) pgoff = base & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) xdr->p = (__be32*)(kaddr + pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) pgend = pgoff + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (pgend > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) pgend = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) xdr->end = (__be32*)(kaddr + pgend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) xdr->iov = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static void xdr_set_page(struct xdr_stream *xdr, unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (xdr_set_page_base(xdr, base, len) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static void xdr_set_next_page(struct xdr_stream *xdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) unsigned int newbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) newbase -= xdr->buf->page_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) xdr_set_page(xdr, newbase, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static bool xdr_set_next_buffer(struct xdr_stream *xdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (xdr->page_ptr != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) xdr_set_next_page(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) else if (xdr->iov == xdr->buf->head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) xdr_set_page(xdr, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return xdr->p != xdr->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * xdr_init_decode - Initialize an xdr_stream for decoding data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * @xdr: pointer to xdr_stream struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * @buf: pointer to XDR buffer from which to decode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * @p: current pointer inside XDR buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * @rqst: pointer to controlling rpc_rqst, for debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct rpc_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) xdr->buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) xdr->scratch.iov_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) xdr->scratch.iov_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) xdr->nwords = XDR_QUADLEN(buf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (buf->head[0].iov_len != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) xdr_set_iov(xdr, buf->head, buf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) else if (buf->page_len != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) xdr_set_page_base(xdr, 0, buf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) xdr_set_iov(xdr, buf->head, buf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (p != NULL && p > xdr->p && xdr->end >= p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) xdr->nwords -= p - xdr->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) xdr->p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) xdr->rqst = rqst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) EXPORT_SYMBOL_GPL(xdr_init_decode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * @xdr: pointer to xdr_stream struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * @buf: pointer to XDR buffer from which to decode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * @pages: list of pages to decode into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * @len: length in bytes of buffer in pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct page **pages, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) memset(buf, 0, sizeof(*buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) buf->pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) buf->page_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) buf->buflen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) buf->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) xdr_init_decode(xdr, buf, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) unsigned int nwords = XDR_QUADLEN(nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) __be32 *p = xdr->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) __be32 *q = p + nwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) xdr->p = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) xdr->nwords -= nwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * @xdr: pointer to xdr_stream struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * @buf: pointer to an empty buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * @buflen: size of 'buf'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * The scratch buffer is used when decoding from an array of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * If an xdr_inline_decode() call spans across page boundaries, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * we copy the data into the scratch buffer in order to allow linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) xdr->scratch.iov_base = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) xdr->scratch.iov_len = buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) char *cpdest = xdr->scratch.iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) size_t cplen = (char *)xdr->end - (char *)xdr->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (nbytes > xdr->scratch.iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) goto out_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) p = __xdr_inline_decode(xdr, cplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (p == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) memcpy(cpdest, p, cplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (!xdr_set_next_buffer(xdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) goto out_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) cpdest += cplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) nbytes -= cplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) p = __xdr_inline_decode(xdr, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (p == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) memcpy(cpdest, p, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return xdr->scratch.iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) out_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) trace_rpc_xdr_overflow(xdr, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * xdr_inline_decode - Retrieve XDR data to decode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * @xdr: pointer to xdr_stream struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * @nbytes: number of bytes of data to decode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * Check if the input buffer is long enough to enable us to decode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * 'nbytes' more bytes of data starting at the current position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * If so return the current pointer, then update the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * pointer position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (unlikely(nbytes == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return xdr->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) goto out_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) p = __xdr_inline_decode(xdr, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (p != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return xdr_copy_to_scratch(xdr, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) out_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) trace_rpc_xdr_overflow(xdr, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) EXPORT_SYMBOL_GPL(xdr_inline_decode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static void xdr_realign_pages(struct xdr_stream *xdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct xdr_buf *buf = xdr->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct kvec *iov = buf->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) unsigned int cur = xdr_stream_pos(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) unsigned int copied, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* Realign pages to current pointer position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (iov->iov_len > cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) offset = iov->iov_len - cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) copied = xdr_shrink_bufhead(buf, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) trace_rpc_xdr_alignment(xdr, offset, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) xdr->nwords = XDR_QUADLEN(buf->len - cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct xdr_buf *buf = xdr->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) unsigned int nwords = XDR_QUADLEN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) unsigned int cur = xdr_stream_pos(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) unsigned int copied, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (xdr->nwords == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) xdr_realign_pages(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (nwords > xdr->nwords) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) nwords = xdr->nwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) len = nwords << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (buf->page_len <= len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) len = buf->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) else if (nwords < xdr->nwords) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /* Truncate page data and move it into the tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) offset = buf->page_len - len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) copied = xdr_shrink_pagelen(buf, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) trace_rpc_xdr_alignment(xdr, offset, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) xdr->nwords = XDR_QUADLEN(buf->len - cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * @xdr: pointer to xdr_stream struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * @len: number of bytes of page data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * Moves data beyond the current pointer position from the XDR head[] buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * into the page list. Any data that lies beyond current position + "len"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * bytes is moved into the XDR tail[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * Returns the number of XDR encoded bytes now contained in the pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct xdr_buf *buf = xdr->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) struct kvec *iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) unsigned int nwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) unsigned int end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) unsigned int padding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) len = xdr_align_pages(xdr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) nwords = XDR_QUADLEN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) padding = (nwords << 2) - len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) xdr->iov = iov = buf->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /* Compute remaining message length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) end = ((xdr->nwords - nwords) << 2) + padding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (end > iov->iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) end = iov->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * Position current pointer at beginning of tail, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * set remaining message length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) xdr->p = (__be32 *)((char *)iov->iov_base + padding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) xdr->end = (__be32 *)((char *)iov->iov_base + end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) xdr->page_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) xdr->nwords = XDR_QUADLEN(end - padding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) EXPORT_SYMBOL_GPL(xdr_read_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) uint64_t xdr_align_data(struct xdr_stream *xdr, uint64_t offset, uint32_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct xdr_buf *buf = xdr->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) unsigned int from, bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) unsigned int shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if ((offset + length) < offset ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) (offset + length) > buf->page_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) length = buf->page_len - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) xdr_realign_pages(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) from = xdr_page_pos(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) bytes = xdr->nwords << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (length < bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) bytes = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /* Move page data to the left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (from > offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) shift = min_t(unsigned int, bytes, buf->page_len - from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) _shift_data_left_pages(buf->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) buf->page_base + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) buf->page_base + from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) bytes -= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /* Move tail data into the pages, if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (bytes > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) _shift_data_left_tail(buf, offset + shift, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) xdr->nwords -= XDR_QUADLEN(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) xdr_set_page(xdr, from + length, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) EXPORT_SYMBOL_GPL(xdr_align_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) uint64_t xdr_expand_hole(struct xdr_stream *xdr, uint64_t offset, uint64_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) struct xdr_buf *buf = xdr->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) unsigned int bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) unsigned int from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) unsigned int truncated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if ((offset + length) < offset ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) (offset + length) > buf->page_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) length = buf->page_len - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) xdr_realign_pages(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) from = xdr_page_pos(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) bytes = xdr->nwords << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (offset + length + bytes > buf->page_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) unsigned int shift = (offset + length + bytes) - buf->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) unsigned int res = _shift_data_right_tail(buf, from + bytes - shift, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) truncated = shift - res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) xdr->nwords -= XDR_QUADLEN(truncated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) bytes -= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* Now move the page data over and zero pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (bytes > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) _shift_data_right_pages(buf->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) buf->page_base + offset + length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) buf->page_base + from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) _zero_pages(buf->pages, buf->page_base + offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) buf->len += length - (from - offset) - truncated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) xdr_set_page(xdr, offset + length, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) EXPORT_SYMBOL_GPL(xdr_expand_hole);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * xdr_enter_page - decode data from the XDR page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * @xdr: pointer to xdr_stream struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * @len: number of bytes of page data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * Moves data beyond the current pointer position from the XDR head[] buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * into the page list. Any data that lies beyond current position + "len"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * bytes is moved into the XDR tail[]. The current pointer is then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * repositioned at the beginning of the first XDR page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) len = xdr_align_pages(xdr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * Position current pointer at beginning of tail, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * set remaining message length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (len != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) xdr_set_page_base(xdr, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) EXPORT_SYMBOL_GPL(xdr_enter_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) buf->head[0] = *iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) buf->tail[0] = empty_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) buf->page_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) buf->buflen = buf->len = iov->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * xdr_buf_subsegment - set subbuf to a portion of buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * @buf: an xdr buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * @subbuf: the result buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * @base: beginning of range in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * @len: length of range in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * sets @subbuf to an xdr buffer representing the portion of @buf of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * length @len starting at offset @base.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * @buf and @subbuf may be pointers to the same struct xdr_buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * Returns -1 if base of length are out of bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) unsigned int base, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) subbuf->buflen = subbuf->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (base < buf->head[0].iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) subbuf->head[0].iov_base = buf->head[0].iov_base + base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) subbuf->head[0].iov_len = min_t(unsigned int, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) buf->head[0].iov_len - base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) len -= subbuf->head[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) base -= buf->head[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) subbuf->head[0].iov_base = buf->head[0].iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) subbuf->head[0].iov_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (base < buf->page_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) subbuf->page_len = min(buf->page_len - base, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) base += buf->page_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) subbuf->page_base = base & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) len -= subbuf->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) base -= buf->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) subbuf->pages = buf->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) subbuf->page_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) subbuf->page_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (base < buf->tail[0].iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) subbuf->tail[0].iov_len = min_t(unsigned int, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) buf->tail[0].iov_len - base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) len -= subbuf->tail[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) base -= buf->tail[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) subbuf->tail[0].iov_base = buf->tail[0].iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) subbuf->tail[0].iov_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (base || len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * @buf: buf to be trimmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * @len: number of bytes to reduce "buf" by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * that it's possible that we'll trim less than that amount if the xdr_buf is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * too small, or if (for instance) it's all in the head and the parser has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * already read too far into it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) size_t cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) unsigned int trim = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (buf->tail[0].iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) cur = min_t(size_t, buf->tail[0].iov_len, trim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) buf->tail[0].iov_len -= cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) trim -= cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (!trim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) goto fix_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (buf->page_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) cur = min_t(unsigned int, buf->page_len, trim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) buf->page_len -= cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) trim -= cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (!trim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) goto fix_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (buf->head[0].iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) cur = min_t(size_t, buf->head[0].iov_len, trim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) buf->head[0].iov_len -= cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) trim -= cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) fix_len:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) buf->len -= (len - trim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) EXPORT_SYMBOL_GPL(xdr_buf_trim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) unsigned int this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) memcpy(obj, subbuf->head[0].iov_base, this_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) len -= this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) obj += this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) this_len = min_t(unsigned int, len, subbuf->page_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (this_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) len -= this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) obj += this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) memcpy(obj, subbuf->tail[0].iov_base, this_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /* obj is assumed to point to allocated memory of size at least len: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct xdr_buf subbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) status = xdr_buf_subsegment(buf, &subbuf, base, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) __read_bytes_from_xdr_buf(&subbuf, obj, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) unsigned int this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) memcpy(subbuf->head[0].iov_base, obj, this_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) len -= this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) obj += this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) this_len = min_t(unsigned int, len, subbuf->page_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (this_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) len -= this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) obj += this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) memcpy(subbuf->tail[0].iov_base, obj, this_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /* obj is assumed to point to allocated memory of size at least len: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct xdr_buf subbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) status = xdr_buf_subsegment(buf, &subbuf, base, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) __write_bytes_to_xdr_buf(&subbuf, obj, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) __be32 raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) *obj = be32_to_cpu(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) EXPORT_SYMBOL_GPL(xdr_decode_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) __be32 raw = cpu_to_be32(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) EXPORT_SYMBOL_GPL(xdr_encode_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) /* Returns 0 on success, or else a negative error code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) struct xdr_array2_desc *desc, int encode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) char *elem = NULL, *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) unsigned int copied = 0, todo, avail_here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) struct page **ppages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (encode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (xdr_encode_word(buf, base, desc->array_len) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) desc->array_len > desc->array_maxlen ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) (unsigned long) base + 4 + desc->array_len *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) desc->elem_size > buf->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) base += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (!desc->xcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) todo = desc->array_len * desc->elem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /* process head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (todo && base < buf->head->iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) c = buf->head->iov_base + base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) avail_here = min_t(unsigned int, todo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) buf->head->iov_len - base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) todo -= avail_here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) while (avail_here >= desc->elem_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) err = desc->xcode(desc, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) c += desc->elem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) avail_here -= desc->elem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (avail_here) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (!elem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) elem = kmalloc(desc->elem_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (!elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (encode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) err = desc->xcode(desc, elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) memcpy(c, elem, avail_here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) memcpy(elem, c, avail_here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) copied = avail_here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) base = buf->head->iov_len; /* align to start of pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) /* process pages array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) base -= buf->head->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (todo && base < buf->page_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) unsigned int avail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) avail_here = min(todo, buf->page_len - base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) todo -= avail_here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) base += buf->page_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) ppages = buf->pages + (base >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) base &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) avail_page = min_t(unsigned int, PAGE_SIZE - base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) avail_here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) c = kmap(*ppages) + base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) while (avail_here) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) avail_here -= avail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (copied || avail_page < desc->elem_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) unsigned int l = min(avail_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) desc->elem_size - copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (!elem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) elem = kmalloc(desc->elem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (!elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (encode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (!copied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) err = desc->xcode(desc, elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) memcpy(c, elem + copied, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) copied += l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (copied == desc->elem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) memcpy(elem + copied, c, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) copied += l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (copied == desc->elem_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) err = desc->xcode(desc, elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) avail_page -= l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) c += l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) while (avail_page >= desc->elem_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) err = desc->xcode(desc, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) c += desc->elem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) avail_page -= desc->elem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (avail_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) unsigned int l = min(avail_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) desc->elem_size - copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (!elem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) elem = kmalloc(desc->elem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) if (!elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (encode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (!copied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) err = desc->xcode(desc, elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) memcpy(c, elem + copied, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) copied += l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (copied == desc->elem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) memcpy(elem + copied, c, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) copied += l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (copied == desc->elem_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) err = desc->xcode(desc, elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (avail_here) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) kunmap(*ppages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) ppages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) c = kmap(*ppages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) avail_page = min(avail_here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) (unsigned int) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) base = buf->page_len; /* align to start of tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /* process tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) base -= buf->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (todo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) c = buf->tail->iov_base + base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (copied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) unsigned int l = desc->elem_size - copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (encode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) memcpy(c, elem + copied, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) memcpy(elem + copied, c, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) err = desc->xcode(desc, elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) todo -= l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) c += l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) while (todo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) err = desc->xcode(desc, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) c += desc->elem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) todo -= desc->elem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) kfree(elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (ppages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) kunmap(*ppages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct xdr_array2_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (base >= buf->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) return xdr_xcode_array2(buf, base, desc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) EXPORT_SYMBOL_GPL(xdr_decode_array2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) struct xdr_array2_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) buf->head->iov_len + buf->page_len + buf->tail->iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) return xdr_xcode_array2(buf, base, desc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) EXPORT_SYMBOL_GPL(xdr_encode_array2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) int (*actor)(struct scatterlist *, void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) unsigned int page_len, thislen, page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) struct scatterlist sg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) sg_init_table(sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (offset >= buf->head[0].iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) offset -= buf->head[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) thislen = buf->head[0].iov_len - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (thislen > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) thislen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) ret = actor(sg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) len -= thislen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (offset >= buf->page_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) offset -= buf->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) page_len = buf->page_len - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (page_len > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) page_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) len -= page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) i = (offset + buf->page_base) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) thislen = PAGE_SIZE - page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (thislen > page_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) thislen = page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) sg_set_page(sg, buf->pages[i], thislen, page_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) ret = actor(sg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) page_len -= thislen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) page_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) thislen = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) } while (page_len != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (offset < buf->tail[0].iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) thislen = buf->tail[0].iov_len - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (thislen > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) thislen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) ret = actor(sg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) len -= thislen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (len != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) EXPORT_SYMBOL_GPL(xdr_process_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * xdr_stream_decode_opaque - Decode variable length opaque
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * @xdr: pointer to xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * @ptr: location to store opaque data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * @size: size of storage buffer @ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * On success, returns size of object stored in *@ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * %-EBADMSG on XDR buffer overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * %-EMSGSIZE on overflow of storage buffer @ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) memcpy(ptr, p, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) * @xdr: pointer to xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) * @ptr: location to store pointer to opaque data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * @maxlen: maximum acceptable object size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * @gfp_flags: GFP mask to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * On success, returns size of object stored in *@ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * %-EBADMSG on XDR buffer overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * %-EMSGSIZE if the size of the object would exceed @maxlen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * %-ENOMEM on memory allocation failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) size_t maxlen, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) *ptr = kmemdup(p, ret, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (*ptr != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) *ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) * xdr_stream_decode_string - Decode variable length string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * @xdr: pointer to xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) * @str: location to store string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * @size: size of storage buffer @str
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) * On success, returns length of NUL-terminated string stored in *@str
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) * %-EBADMSG on XDR buffer overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) * %-EMSGSIZE on overflow of storage buffer @str
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) memcpy(str, p, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) str[ret] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) return strlen(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) *str = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * xdr_stream_decode_string_dup - Decode and duplicate variable length string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * @xdr: pointer to xdr_stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) * @str: location to store pointer to string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * @maxlen: maximum acceptable string length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * @gfp_flags: GFP mask to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) * On success, returns length of NUL-terminated string stored in *@ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * %-EBADMSG on XDR buffer overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) * %-EMSGSIZE if the size of the string would exceed @maxlen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * %-ENOMEM on memory allocation failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) size_t maxlen, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) char *s = kmalloc(ret + 1, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (s != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) memcpy(s, p, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) s[ret] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) *str = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return strlen(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) *str = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);