Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * licenses.  You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *     Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *     without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *     conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *      - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *        disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *      - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *        disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *        provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include "rds.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * This file implements the receive side of the unconventional congestion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * management in RDS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * Messages waiting in the receive queue on the receiving socket are accounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * against the sockets SO_RCVBUF option value.  Only the payload bytes in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * message are accounted for.  If the number of bytes queued equals or exceeds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * rcvbuf then the socket is congested.  All sends attempted to this socket's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * address should return block or return -EWOULDBLOCK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * Applications are expected to be reasonably tuned such that this situation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * very rarely occurs.  An application encountering this "back-pressure" is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * considered a bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * This is implemented by having each node maintain bitmaps which indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * which ports on bound addresses are congested.  As the bitmap changes it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * sent through all the connections which terminate in the local address of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * bitmap which changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * The bitmaps are allocated as connections are brought up.  This avoids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * allocation in the interrupt handling path which queues messages on sockets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * The dense bitmaps let transports send the entire bitmap on any bitmap change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * reasonably efficiently.  This is much easier to implement than some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * finer-grained communication of per-port congestion.  The sender does a very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * inexpensive bit test to test if the port it's about to send to is congested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * Interaction with poll is a tad tricky. We want all processes stuck in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * poll to wake up and check whether a congested destination became uncongested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * The really sad thing is we have no idea which destinations the application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * wants to send to - we don't even know which rds_connections are involved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * So until we implement a more flexible rds poll interface, we have to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * do with this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * We maintain a global counter that is incremented each time a congestion map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * update is received. Each rds socket tracks this value, and if rds_poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * finds that the saved generation number is smaller than the global generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * number, it wakes up the process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static atomic_t		rds_cong_generation = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * Congestion monitoring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static LIST_HEAD(rds_cong_monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static DEFINE_RWLOCK(rds_cong_monitor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * Yes, a global lock.  It's used so infrequently that it's worth keeping it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * global to simplify the locking.  It's only used in the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * circumstances:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  *  - on connection buildup to associate a conn with its maps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *  - on map changes to inform conns of a new map to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *  It's sadly ordered under the socket callback lock and the connection lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  *  Receive paths can mark ports congested from interrupt context so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  *  lock masks interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static DEFINE_SPINLOCK(rds_cong_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static struct rb_root rds_cong_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static struct rds_cong_map *rds_cong_tree_walk(const struct in6_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 					       struct rds_cong_map *insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	struct rb_node **p = &rds_cong_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct rds_cong_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		int diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		map = rb_entry(parent, struct rds_cong_map, m_rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		diff = rds_addr_cmp(addr, &map->m_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		if (diff < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		else if (diff > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 			return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	if (insert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		rb_link_node(&insert->m_rb_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		rb_insert_color(&insert->m_rb_node, &rds_cong_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * There is only ever one bitmap for any address.  Connections try and allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * these bitmaps in the process getting pointers to them.  The bitmaps are only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * ever freed as the module is removed after all connections have been freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static struct rds_cong_map *rds_cong_from_addr(const struct in6_addr *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	struct rds_cong_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	struct rds_cong_map *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	unsigned long zp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	map->m_addr = *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	init_waitqueue_head(&map->m_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	INIT_LIST_HEAD(&map->m_conn_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		zp = get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		if (zp == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		map->m_page_addrs[i] = zp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	spin_lock_irqsave(&rds_cong_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	ret = rds_cong_tree_walk(addr, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	spin_unlock_irqrestore(&rds_cong_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		ret = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	if (map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			free_page(map->m_page_addrs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	rdsdebug("map %p for addr %pI6c\n", ret, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * Put the conn on its local map's list.  This is called when the conn is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * really added to the hash.  It's nested under the rds_conn_lock, sadly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void rds_cong_add_conn(struct rds_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	spin_lock_irqsave(&rds_cong_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	spin_unlock_irqrestore(&rds_cong_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void rds_cong_remove_conn(struct rds_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	spin_lock_irqsave(&rds_cong_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	list_del_init(&conn->c_map_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	spin_unlock_irqrestore(&rds_cong_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int rds_cong_get_maps(struct rds_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	conn->c_lcong = rds_cong_from_addr(&conn->c_laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	conn->c_fcong = rds_cong_from_addr(&conn->c_faddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	if (!(conn->c_lcong && conn->c_fcong))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void rds_cong_queue_updates(struct rds_cong_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	struct rds_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	spin_lock_irqsave(&rds_cong_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		struct rds_conn_path *cp = &conn->c_path[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		if (!test_and_set_bit(0, &conn->c_map_queued) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		    !rds_destroy_pending(cp->cp_conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			rds_stats_inc(s_cong_update_queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			/* We cannot inline the call to rds_send_xmit() here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			 * for two reasons (both pertaining to a TCP transport):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			 * 1. When we get here from the receive path, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			 *    are already holding the sock_lock (held by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			 *    tcp_v4_rcv()). So inlining calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			 *    tcp_setsockopt and/or tcp_sendmsg will deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			 *    when it tries to get the sock_lock())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			 * 2. Interrupts are masked so that we can mark the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			 *    port congested from both send and recv paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			 *    (See comment around declaration of rdc_cong_lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			 *    An attempt to get the sock_lock() here will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			 *    therefore trigger warnings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			 * Defer the xmit to rds_send_worker() instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	spin_unlock_irqrestore(&rds_cong_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	rdsdebug("waking map %p for %pI4\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	  map, &map->m_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	rds_stats_inc(s_cong_update_received);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	atomic_inc(&rds_cong_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (waitqueue_active(&map->m_waitq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		wake_up(&map->m_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (waitqueue_active(&rds_poll_waitq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		wake_up_all(&rds_poll_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (portmask && !list_empty(&rds_cong_monitor)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		struct rds_sock *rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		read_lock_irqsave(&rds_cong_monitor_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			spin_lock(&rs->rs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			rs->rs_cong_notify |= (rs->rs_cong_mask & portmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			rs->rs_cong_mask &= ~portmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			spin_unlock(&rs->rs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			if (rs->rs_cong_notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 				rds_wake_sk_sleep(rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) EXPORT_SYMBOL_GPL(rds_cong_map_updated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int rds_cong_updated_since(unsigned long *recent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	unsigned long gen = atomic_read(&rds_cong_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	if (likely(*recent == gen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	*recent = gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * We're called under the locking that protects the sockets receive buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * consumption.  This makes it a lot easier for the caller to only call us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  * when it knows that an existing set bit needs to be cleared, and vice versa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  * We can't block and we need to deal with concurrent sockets working against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  * the same per-address map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	rdsdebug("setting congestion for %pI4:%u in map %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	  &map->m_addr, ntohs(port), map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	set_bit_le(off, (void *)map->m_page_addrs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	rdsdebug("clearing congestion for %pI4:%u in map %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	  &map->m_addr, ntohs(port), map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	clear_bit_le(off, (void *)map->m_page_addrs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	return test_bit_le(off, (void *)map->m_page_addrs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) void rds_cong_add_socket(struct rds_sock *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	write_lock_irqsave(&rds_cong_monitor_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	if (list_empty(&rs->rs_cong_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		list_add(&rs->rs_cong_list, &rds_cong_monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) void rds_cong_remove_socket(struct rds_sock *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	struct rds_cong_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	write_lock_irqsave(&rds_cong_monitor_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	list_del_init(&rs->rs_cong_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	/* update congestion map for now-closed port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	spin_lock_irqsave(&rds_cong_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	map = rds_cong_tree_walk(&rs->rs_bound_addr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	spin_unlock_irqrestore(&rds_cong_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		rds_cong_clear_bit(map, rs->rs_bound_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		rds_cong_queue_updates(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		  struct rds_sock *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	if (!rds_cong_test_bit(map, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (nonblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		if (rs && rs->rs_cong_monitor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			/* It would have been nice to have an atomic set_bit on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			 * a uint64_t. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			spin_lock_irqsave(&rs->rs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			spin_unlock_irqrestore(&rs->rs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			/* Test again - a congestion update may have arrived in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			 * the meantime. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			if (!rds_cong_test_bit(map, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		rds_stats_inc(s_cong_send_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	rds_stats_inc(s_cong_send_blocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	return wait_event_interruptible(map->m_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 					!rds_cong_test_bit(map, port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) void rds_cong_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	struct rds_cong_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	while ((node = rb_first(&rds_cong_tree))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		map = rb_entry(node, struct rds_cong_map, m_rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		rdsdebug("freeing map %p\n", map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		rb_erase(&map->m_rb_node, &rds_cong_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 			free_page(map->m_page_addrs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)  * Allocate a RDS message containing a congestion update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct rds_message *rds_cong_update_alloc(struct rds_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	struct rds_cong_map *map = conn->c_lcong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	struct rds_message *rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	if (!IS_ERR(rm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	return rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }