^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "rds.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "tcp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static struct kmem_cache *rds_tcp_incoming_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static void rds_tcp_inc_purge(struct rds_incoming *inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct rds_tcp_incoming *tinc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) rdsdebug("purging tinc %p inc %p\n", tinc, inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) skb_queue_purge(&tinc->ti_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void rds_tcp_inc_free(struct rds_incoming *inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct rds_tcp_incoming *tinc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) rds_tcp_inc_purge(inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) kmem_cache_free(rds_tcp_incoming_slab, tinc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * this is pretty lame, but, whatever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct rds_tcp_incoming *tinc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!iov_iter_count(to))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) skb_queue_walk(&tinc->ti_skb_list, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long to_copy, skb_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) to_copy = iov_iter_count(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) to_copy = min(to_copy, skb->len - skb_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) rds_stats_add(s_copy_to_user, to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ret += to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (!iov_iter_count(to))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * We have a series of skbs that have fragmented pieces of the congestion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * bitmap. They must add up to the exact size of the congestion bitmap. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * use the skb helpers to copy those into the pages that make up the in-memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * congestion bitmap for the remote address of this connection. We then tell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * the congestion core that the bitmap has been changed so that it can wake up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * sleepers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * This is racing with sending paths which are using test_bit to see if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * bitmap indicates that their recipient is congested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void rds_tcp_cong_recv(struct rds_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct rds_tcp_incoming *tinc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned int to_copy, skb_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned int map_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unsigned int map_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct rds_cong_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* catch completely corrupt packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) map_page = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) map_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) map = conn->c_fcong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) skb_queue_walk(&tinc->ti_skb_list, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) skb_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) while (skb_off < skb->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) skb->len - skb_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* only returns 0 or -error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ret = skb_copy_bits(skb, skb_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) (void *)map->m_page_addrs[map_page] + map_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) BUG_ON(ret != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) skb_off += to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) map_off += to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (map_off == PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) map_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) map_page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) rds_cong_map_updated(map, ~(u64) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct rds_tcp_desc_arg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct rds_conn_path *conn_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) gfp_t gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned int offset, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct rds_tcp_desc_arg *arg = desc->arg.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct rds_conn_path *cp = arg->conn_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct rds_tcp_connection *tc = cp->cp_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct rds_tcp_incoming *tinc = tc->t_tinc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct sk_buff *clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) size_t left = len, to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * tcp_read_sock() interprets partial progress as an indication to stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) while (left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!tinc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) arg->gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!tinc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) desc->error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) tc->t_tinc = tinc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) rdsdebug("alloced tinc %p\n", tinc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) rds_inc_path_init(&tinc->ti_inc, cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) &cp->cp_conn->c_faddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) local_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * XXX * we might be able to use the __ variants when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * we've already serialized at a higher level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) skb_queue_head_init(&tinc->ti_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (left && tc->t_tinc_hdr_rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) to_copy = min(tc->t_tinc_hdr_rem, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) rdsdebug("copying %zu header from skb %p\n", to_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) skb_copy_bits(skb, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) (char *)&tinc->ti_inc.i_hdr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) sizeof(struct rds_header) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) tc->t_tinc_hdr_rem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) tc->t_tinc_hdr_rem -= to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) left -= to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) offset += to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (tc->t_tinc_hdr_rem == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* could be 0 for a 0 len message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) tc->t_tinc_data_rem =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) local_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (left && tc->t_tinc_data_rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) to_copy = min(tc->t_tinc_data_rem, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) clone = pskb_extract(skb, offset, to_copy, arg->gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (!clone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) desc->error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) skb_queue_tail(&tinc->ti_skb_list, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) "clone %p data %p len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) skb, skb->data, skb->len, offset, to_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) clone, clone->data, clone->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) tc->t_tinc_data_rem -= to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) left -= to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) offset += to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct rds_connection *conn = cp->cp_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) rds_tcp_cong_recv(conn, tinc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) rds_recv_incoming(conn, &conn->c_faddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) &conn->c_laddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) &tinc->ti_inc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) arg->gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) tc->t_tinc_hdr_rem = sizeof(struct rds_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) tc->t_tinc_data_rem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) tc->t_tinc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) rds_inc_put(&tinc->ti_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) tinc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) len, left, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return len - left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* the caller has to hold the sock lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static int rds_tcp_read_sock(struct rds_conn_path *cp, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct rds_tcp_connection *tc = cp->cp_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct socket *sock = tc->t_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) read_descriptor_t desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct rds_tcp_desc_arg arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* It's like glib in the kernel! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) arg.conn_path = cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) arg.gfp = gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) desc.arg.data = &arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) desc.error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) desc.count = 1; /* give more than one skb per call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) desc.error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return desc.error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * data_ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * if we fail to allocate we're in trouble.. blindly wait some time before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * trying again to see if the VM can free up something for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int rds_tcp_recv_path(struct rds_conn_path *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct rds_tcp_connection *tc = cp->cp_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct socket *sock = tc->t_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) rdsdebug("recv worker path [%d] tc %p sock %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) cp->cp_index, tc, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) lock_sock(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ret = rds_tcp_read_sock(cp, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) release_sock(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) void rds_tcp_data_ready(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) void (*ready)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct rds_conn_path *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct rds_tcp_connection *tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) rdsdebug("data ready sk %p\n", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) cp = sk->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!cp) { /* check for teardown race */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ready = sk->sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) tc = cp->cp_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ready = tc->t_orig_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rds_tcp_stats_inc(s_tcp_data_ready_calls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (!rds_destroy_pending(cp->cp_conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int rds_tcp_recv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) sizeof(struct rds_tcp_incoming),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!rds_tcp_incoming_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) void rds_tcp_recv_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) kmem_cache_destroy(rds_tcp_incoming_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }