^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/net/sunrpc/socklib.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Common socket helper routines for RPC client and server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sunrpc/msg_prot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sunrpc/xdr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "socklib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Helper structure for copying from an sk_buff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct xdr_skb_reader {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) __wsum csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * xdr_skb_read_bits - copy some data bits from skb to internal buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * @desc: sk_buff copy helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * @to: copy destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Possibly called several times to iterate over an sk_buff and copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * data out of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (len > desc->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) len = desc->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) desc->count -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) desc->offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * @desc: sk_buff copy helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @to: copy destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Same as skb_read_bits, but calculate a checksum at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __wsum csum2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (len > desc->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) len = desc->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) pos = desc->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) desc->csum = csum_block_add(desc->csum, csum2, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) desc->count -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) desc->offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * xdr_partial_copy_from_skb - copy data out of an skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * @xdr: target XDR buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * @base: starting offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * @desc: sk_buff copy helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * @copy_actor: virtual method for copying data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct page **ppage = xdr->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned int len, pglen = xdr->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) ssize_t copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) len = xdr->head[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (base < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) len -= base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) copied += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (ret != len || !desc->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) base -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (unlikely(pglen == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) goto copy_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (unlikely(base >= pglen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) base -= pglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) goto copy_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (base || xdr->page_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pglen -= base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) base += xdr->page_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ppage += base >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) base &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) char *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* ACL likes to be lazy in allocating pages - ACLs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * are small by default but can get huge. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if ((xdr->flags & XDRBUF_SPARSE_PAGES) && *ppage == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) *ppage = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (unlikely(*ppage == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (copied == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) copied = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) kaddr = kmap_atomic(*ppage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) len -= base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (pglen < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) len = pglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ret = copy_actor(desc, kaddr + base, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (pglen < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) len = pglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ret = copy_actor(desc, kaddr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) flush_dcache_page(*ppage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) copied += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (ret != len || !desc->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ppage++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) } while ((pglen -= len) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) copy_tail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) len = xdr->tail[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (base < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * csum_partial_copy_to_xdr - checksum and copy data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * @xdr: target XDR buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * @skb: source skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * We have set things up such that we perform the checksum of the UDP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * packet in parallel with the copies into the RPC client iovec. -DaveM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct xdr_skb_reader desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) desc.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) desc.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) desc.count = skb->len - desc.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (skb_csum_unnecessary(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) goto no_checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (desc.offset != skb->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) __wsum csum2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (desc.count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (csum_fold(desc.csum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) !skb->csum_complete_sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) netdev_rx_csum_fault(skb->dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) no_checksum:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (desc.count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static inline int xprt_sendmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) size_t seek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (seek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) iov_iter_advance(&msg->msg_iter, seek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return sock_sendmsg(sock, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static int xprt_send_kvec(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct kvec *vec, size_t seek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return xprt_sendmsg(sock, msg, seek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct xdr_buf *xdr, size_t base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) err = xdr_alloc_bvec(xdr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) xdr->page_len + xdr->page_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return xprt_sendmsg(sock, msg, base + xdr->page_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* Common case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * - stream transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * - sending from byte 0 of the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * - the message is wholly contained in @xdr's head iovec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int xprt_send_rm_and_kvec(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) rpc_fraghdr marker, struct kvec *vec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) size_t base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct kvec iov[2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) [0] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .iov_base = &marker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .iov_len = sizeof(marker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) [1] = *vec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) size_t len = iov[0].iov_len + iov[1].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return xprt_sendmsg(sock, msg, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * xprt_sock_sendmsg - write an xdr_buf directly to a socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @sock: open socket to send on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * @msg: socket message metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * @xdr: xdr_buf containing this request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * @base: starting position in the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * @marker: stream record marker field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @sent_p: return the total number of bytes successfully queued for sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * On success, returns zero and fills in @sent_p.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * %-ENOTSOCK if @sock is not a struct socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int xprt_sock_sendmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct xdr_buf *xdr, unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) rpc_fraghdr marker, unsigned int *sent_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) unsigned int rmsize = marker ? sizeof(marker) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned int remainder = rmsize + xdr->len - base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) unsigned int want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) *sent_p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (unlikely(!sock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return -ENOTSOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) msg->msg_flags |= MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) want = xdr->head[0].iov_len + rmsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (base < want) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned int len = want - base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) remainder -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (remainder == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) msg->msg_flags &= ~MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (rmsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) err = xprt_send_rm_and_kvec(sock, msg, marker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) &xdr->head[0], base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) err = xprt_send_kvec(sock, msg, &xdr->head[0], base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (remainder == 0 || err != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *sent_p += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) base -= want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (base < xdr->page_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unsigned int len = xdr->page_len - base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) remainder -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (remainder == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) msg->msg_flags &= ~MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) err = xprt_send_pagedata(sock, msg, xdr, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (remainder == 0 || err != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *sent_p += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) base -= xdr->page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (base >= xdr->tail[0].iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) msg->msg_flags &= ~MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) err = xprt_send_kvec(sock, msg, &xdr->tail[0], base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *sent_p += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }