Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * licenses.  You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *     Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *     without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *     conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *      - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *        disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *      - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *        disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *        provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "rds_single_path.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "rds.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include "ib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include "ib_mr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  * Convert IB-specific error message to RDS error message and call core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * completion handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) static void rds_ib_send_complete(struct rds_message *rm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 				 int wc_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 				 void (*complete)(struct rds_message *rm, int status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	int notify_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	switch (wc_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	case IB_WC_WR_FLUSH_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	case IB_WC_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		notify_status = RDS_RDMA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	case IB_WC_REM_ACCESS_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		notify_status = RDS_RDMA_REMOTE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		notify_status = RDS_RDMA_OTHER_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	complete(rm, notify_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 				   struct rm_data_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 				   int wc_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	if (op->op_nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		ib_dma_unmap_sg(ic->i_cm_id->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 				op->op_sg, op->op_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 				DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 				   struct rm_rdma_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 				   int wc_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	if (op->op_mapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		ib_dma_unmap_sg(ic->i_cm_id->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 				op->op_sg, op->op_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 				op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		op->op_mapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	/* If the user asked for a completion notification on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	 * message, we can implement three different semantics:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	 *  1.	Notify when we received the ACK on the RDS message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	 *	that was queued with the RDMA. This provides reliable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	 *	notification of RDMA status at the expense of a one-way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	 *	packet delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	 *  2.	Notify when the IB stack gives us the completion event for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	 *	the RDMA operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	 *  3.	Notify when the IB stack gives us the completion event for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	 *	the accompanying RDS messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	 * Here, we implement approach #3. To implement approach #2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	 * we would need to take an event for the rdma WR. To implement #1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	 * don't call rds_rdma_send_complete at all, and fall back to the notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	 * handling in the ACK processing code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	 * Note: There's no need to explicitly sync any RDMA buffers using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	 * operation itself unmapped the RDMA buffers, which takes care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	 * of synching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	rds_ib_send_complete(container_of(op, struct rds_message, rdma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 			     wc_status, rds_rdma_send_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	if (op->op_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		rds_stats_add(s_send_rdma_bytes, op->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 				     struct rm_atomic_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 				     int wc_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	/* unmap atomic recvbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	if (op->op_mapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 				DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		op->op_mapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	rds_ib_send_complete(container_of(op, struct rds_message, atomic),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			     wc_status, rds_atomic_send_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		rds_ib_stats_inc(s_ib_atomic_cswp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		rds_ib_stats_inc(s_ib_atomic_fadd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * Unmap the resources associated with a struct send_work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * Returns the rm for no good reason other than it is unobtainable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  * other than by switching on wr.opcode, currently, and the caller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * the event handler, needs it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 						struct rds_ib_send_work *send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 						int wc_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	struct rds_message *rm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	/* In the error case, wc.opcode sometimes contains garbage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	switch (send->s_wr.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	case IB_WR_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		if (send->s_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 			rm = container_of(send->s_op, struct rds_message, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 			rds_ib_send_unmap_data(ic, send->s_op, wc_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	case IB_WR_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	case IB_WR_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		if (send->s_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 			rm = container_of(send->s_op, struct rds_message, rdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	case IB_WR_ATOMIC_FETCH_AND_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	case IB_WR_ATOMIC_CMP_AND_SWP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		if (send->s_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 			rm = container_of(send->s_op, struct rds_message, atomic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		printk_ratelimited(KERN_NOTICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 			       "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 			       __func__, send->s_wr.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	send->s_wr.opcode = 0xdead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	return rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) void rds_ib_send_init_ring(struct rds_ib_connection *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	struct rds_ib_send_work *send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		struct ib_sge *sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		send->s_op = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		send->s_wr.wr_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		send->s_wr.sg_list = send->s_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		send->s_wr.ex.imm_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		sge = &send->s_sge[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		sge->addr = ic->i_send_hdrs_dma[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		sge->length = sizeof(struct rds_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		sge->lkey = ic->i_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	struct rds_ib_send_work *send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		if (send->s_op && send->s_wr.opcode != 0xdead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * The only fast path caller always has a non-zero nr, so we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * bother testing nr before performing the atomic sub.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	    waitqueue_active(&rds_ib_ring_empty_wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		wake_up(&rds_ib_ring_empty_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  * operations performed in the send path.  As the sender allocs and potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  * unallocs the next free entry in the ring it doesn't alter which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  * the next to be freed, which is what this is concerned with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	struct rds_message *rm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	struct rds_connection *conn = ic->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	struct rds_ib_send_work *send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u32 completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	u32 oldest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	u32 i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	int nr_sig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		 (unsigned long long)wc->wr_id, wc->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		 ib_wc_status_msg(wc->status), wc->byte_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		 be32_to_cpu(wc->ex.imm_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	rds_ib_stats_inc(s_ib_tx_cq_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	if (wc->wr_id == RDS_IB_ACK_WR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			rds_ib_stats_inc(s_ib_tx_stalled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		rds_ib_ack_send_complete(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	oldest = rds_ib_ring_oldest(&ic->i_send_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	for (i = 0; i < completed; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		send = &ic->i_sends[oldest];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			nr_sig++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		rm = rds_ib_send_unmap_op(ic, send, wc->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		if (time_after(jiffies, send->s_queued + HZ / 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			rds_ib_stats_inc(s_ib_tx_stalled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		if (send->s_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			if (send->s_op == rm->m_final_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 				/* If anyone waited for this message to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 				 * flushed out, wake them up now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 				rds_message_unmapped(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			rds_message_put(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			send->s_op = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		oldest = (oldest + 1) % ic->i_send_ring.w_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	rds_ib_ring_free(&ic->i_send_ring, completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	rds_ib_sub_signaled(ic, nr_sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	nr_sig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	    test_bit(0, &conn->c_map_queued))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	/* We expect errors as the qp is drained during shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 				  &conn->c_laddr, &conn->c_faddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 				  conn->c_tos, wc->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 				  ib_wc_status_msg(wc->status), wc->vendor_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * This is the main function for allocating credits when sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * Conceptually, we have two counters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  *  -	send credits: this tells us how many WRs we're allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  *	to submit without overruning the receiver's queue. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  *	each SEND WR we post, we decrement this by one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  *  -	posted credits: this tells us how many WRs we recently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  *	posted to the receive queue. This value is transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  *	to the peer as a "credit update" in a RDS header field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  *	Every time we transmit credits to the peer, we subtract
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  *	the amount of transferred credits from this counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  * It is essential that we avoid situations where both sides have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * exhausted their send credits, and are unable to send new credits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * to the peer. We achieve this by requiring that we send at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * one credit update to the peer before exhausting our credits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * When new credits arrive, we subtract one credit that is withheld
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * until we've posted new buffers and are ready to transmit these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * credits (see rds_ib_send_add_credits below).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * The RDS send code is essentially single-threaded; rds_send_xmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * However, the ACK sending code is independent and can race with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  * message SENDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * In the send path, we need to update the counters for send credits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  * and the counter of posted buffers atomically - when we use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  * last available credit, we cannot allow another thread to race us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * and grab the posted credits counter.  Hence, we have to use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  * spinlock to protect the credit counter, or use atomics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  * Spinlocks shared between the send and the receive path are bad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  * because they create unnecessary delays. An early implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)  * using a spinlock showed a 5% degradation in throughput at some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349)  * loads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351)  * This implementation avoids spinlocks completely, putting both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  * counters into a single atomic, and updating that atomic using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  * atomic_add (in the receive path, when receiving fresh credits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  * and using atomic_cmpxchg when updating the two counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	unsigned int avail, posted, got = 0, advertise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	long oldval, newval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	*adv_credits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	if (!ic->i_flowctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		return wanted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) try_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	advertise = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	oldval = newval = atomic_read(&ic->i_credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	posted = IB_GET_POST_CREDITS(oldval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	avail = IB_GET_SEND_CREDITS(oldval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	rdsdebug("wanted=%u credits=%u posted=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			wanted, avail, posted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	/* The last credit must be used to send a credit update. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	if (avail && !posted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		avail--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (avail < wanted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		struct rds_connection *conn = ic->i_cm_id->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		/* Oops, there aren't that many credits left! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		got = avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		/* Sometimes you get what you want, lalala. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		got = wanted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	newval -= IB_SET_SEND_CREDITS(got);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 * If need_posted is non-zero, then the caller wants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * the posted regardless of whether any send credits are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 * available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (posted && (got || need_posted)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		advertise = min_t(unsigned int, posted, max_posted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		newval -= IB_SET_POST_CREDITS(advertise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	/* Finally bill everything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		goto try_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	*adv_credits = advertise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	return got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	if (credits == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	rdsdebug("credits=%u current=%u%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 			credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	rds_ib_stats_inc(s_ib_rx_credit_updates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	if (posted == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	/* Decide whether to send an update to the peer now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	 * If we would send a credit update for every single buffer we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	 * post, we would end up with an ACK storm (ACK arrives,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	 * consumes buffer, we refill the ring, send ACK to remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	 * advertising the newly posted buffer... ad inf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 * Performance pretty much depends on how often we send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	 * credit updates - too frequent updates mean lots of ACKs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	 * Too infrequent updates, and the peer will run out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	 * credits and has to throttle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	 * For the time being, 16 seems to be a good compromise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 					     struct rds_ib_send_work *send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 					     bool notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	 * We want to delay signaling completions just enough to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	 * the batching benefits but not so much that we create dead time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	 * on the wire.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (ic->i_unsignaled_wrs-- == 0 || notify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		send->s_wr.send_flags |= IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473)  * This can be called multiple times for a given message.  The first time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474)  * we see a message we map its scatterlist into the IB device so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475)  * we can provide that mapped address to the IB scatter gather entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476)  * in the IB work requests.  We translate the scatterlist into a series
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * of work requests that fragment the message.  These work requests complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  * in order so we pass ownership of the message to the completion handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  * once we send the final fragment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481)  * The RDS core uses the c_send_lock to only enter this function once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  * per connection.  This makes sure that the tx ring alloc/unalloc pairs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483)  * don't get out of sync and confuse the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		unsigned int hdr_off, unsigned int sg, unsigned int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	struct ib_device *dev = ic->i_cm_id->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	struct rds_ib_send_work *send = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	struct rds_ib_send_work *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	struct rds_ib_send_work *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	const struct ib_send_wr *failed_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	struct scatterlist *scat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	u32 pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	u32 work_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	u32 credit_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	u32 posted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	u32 adv_credits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	int send_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	int bytes_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	int flow_controlled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	int nr_sig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	BUG_ON(off % RDS_FRAG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	/* Do not send cong updates to IB loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	if (conn->c_loopback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	    && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		scat = &rm->data.op_sg[sg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		return sizeof(struct rds_header) + ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	/* FIXME we may overallocate here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	if (work_alloc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		rds_ib_stats_inc(s_ib_tx_ring_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	if (ic->i_flowctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		adv_credits += posted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		if (credit_alloc < work_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			work_alloc = credit_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			flow_controlled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		if (work_alloc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			rds_ib_stats_inc(s_ib_tx_throttle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	/* map the message the first time we see it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	if (!ic->i_data_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		if (rm->data.op_nents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			rm->data.op_count = ib_dma_map_sg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 							  rm->data.op_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 							  rm->data.op_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 							  DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			if (rm->data.op_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 				rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 				ret = -ENOMEM; /* XXX ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			rm->data.op_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		rds_message_addref(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		rm->data.op_dmasg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		rm->data.op_dmaoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		ic->i_data_op = &rm->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		/* Finalize the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		/* If it has a RDMA op, tell the peer we did it. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		 * used by the peer to release use-once RDMA MRs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		if (rm->rdma.op_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			struct rds_ext_header_rdma ext_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 			rds_message_add_extension(&rm->m_inc.i_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 					RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		if (rm->m_rdma_cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 			rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 					rds_rdma_cookie_key(rm->m_rdma_cookie),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 					rds_rdma_cookie_offset(rm->m_rdma_cookie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		 * we should not do this unless we have a chance of at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		 * sticking the header into the send ring. Which is why we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		 * should call rds_ib_ring_alloc first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		rds_message_make_checksum(&rm->m_inc.i_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		 * Update adv_credits since we reset the ACK_REQUIRED bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		if (ic->i_flowctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			adv_credits += posted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			BUG_ON(adv_credits > 255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	/* Sometimes you want to put a fence between an RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	 * READ and the following SEND.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	 * We could either do this all the time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	 * or when requested by the user. Right now, we let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	 * the application choose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (rm->rdma.op_active && rm->rdma.op_fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		send_flags = IB_SEND_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	/* Each frag gets a header. Msgs may be 0 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	send = &ic->i_sends[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	first = send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	scat = &ic->i_data_op->op_sg[rm->data.op_dmasg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		unsigned int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		/* Set up the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		send->s_wr.send_flags = send_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		send->s_wr.opcode = IB_WR_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		send->s_wr.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		send->s_wr.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		send->s_queued = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		send->s_op = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		send->s_sge[0].addr = ic->i_send_hdrs_dma[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		send->s_sge[0].length = sizeof(struct rds_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 					   ic->i_send_hdrs_dma[pos],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 					   sizeof(struct rds_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 					   DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		       sizeof(struct rds_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		/* Set up the data, if present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		if (i < work_alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		    && scat != &rm->data.op_sg[rm->data.op_count]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			len = min(RDS_FRAG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 				  sg_dma_len(scat) - rm->data.op_dmaoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			send->s_wr.num_sge = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			send->s_sge[1].addr = sg_dma_address(scat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			send->s_sge[1].addr += rm->data.op_dmaoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			send->s_sge[1].length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			bytes_sent += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 			rm->data.op_dmaoff += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			if (rm->data.op_dmaoff == sg_dma_len(scat)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 				scat++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 				rm->data.op_dmasg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 				rm->data.op_dmaoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		rds_ib_set_wr_signal_state(ic, send, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		 * Always signal the last one if we're stopping due to flow control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			rds_ib_set_wr_signal_state(ic, send, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			send->s_wr.send_flags |= IB_SEND_SOLICITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			nr_sig++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		if (ic->i_flowctl && adv_credits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			struct rds_header *hdr = ic->i_send_hdrs[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			/* add credit and redo the header checksum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			hdr->h_credit = adv_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			rds_message_make_checksum(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 			adv_credits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			rds_ib_stats_inc(s_ib_tx_credit_updates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		ib_dma_sync_single_for_device(ic->rds_ibdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 					      ic->i_send_hdrs_dma[pos],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 					      sizeof(struct rds_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 					      DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			prev->s_wr.next = &send->s_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		prev = send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		pos = (pos + 1) % ic->i_send_ring.w_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		send = &ic->i_sends[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	} while (i < work_alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		 && scat != &rm->data.op_sg[rm->data.op_count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	/* Account the RDS header in the number of bytes we sent, but just once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	 * The caller has no concept of fragmentation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (hdr_off == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		bytes_sent += sizeof(struct rds_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	/* if we finished the message then send completion owns it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (scat == &rm->data.op_sg[rm->data.op_count]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		prev->s_op = ic->i_data_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		prev->s_wr.send_flags |= IB_SEND_SOLICITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		ic->i_data_op = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	/* Put back wrs & credits we didn't use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	if (i < work_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		work_alloc = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (ic->i_flowctl && i < credit_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		rds_ib_send_add_credits(conn, credit_alloc - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (nr_sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		atomic_add(nr_sig, &ic->i_signaled_sends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	/* XXX need to worry about failed_wr and partial sends. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	failed_wr = &first->s_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		 first, &first->s_wr, ret, failed_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	BUG_ON(failed_wr != &first->s_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		printk(KERN_WARNING "RDS/IB: ib_post_send to %pI6c "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		       "returned %d\n", &conn->c_faddr, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		rds_ib_sub_signaled(ic, nr_sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		if (prev->s_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			ic->i_data_op = prev->s_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 			prev->s_op = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	ret = bytes_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	BUG_ON(adv_credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * Issue atomic operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * A simplified version of the rdma case, we always map 1 SG, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  * only 8 bytes, for the return value from the atomic operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	struct rds_ib_send_work *send = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	const struct ib_send_wr *failed_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	u32 pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	u32 work_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	int nr_sig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	if (work_alloc != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		rds_ib_stats_inc(s_ib_tx_ring_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	/* address of send request in ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	send = &ic->i_sends[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	send->s_queued = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		send->s_atomic_wr.compare_add = op->op_m_cswp.compare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		send->s_atomic_wr.swap = op->op_m_cswp.swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	} else { /* FADD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		send->s_atomic_wr.compare_add = op->op_m_fadd.add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		send->s_atomic_wr.swap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		send->s_atomic_wr.swap_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	send->s_wr.send_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	send->s_atomic_wr.wr.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	send->s_atomic_wr.wr.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	send->s_atomic_wr.remote_addr = op->op_remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	send->s_atomic_wr.rkey = op->op_rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	send->s_op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	/* map 8 byte retval buffer to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	if (ret != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		ret = -ENOMEM; /* XXX ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	/* Convert our struct scatterlist to struct ib_sge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	send->s_sge[0].addr = sg_dma_address(op->op_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	send->s_sge[0].length = sg_dma_len(op->op_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		 send->s_sge[0].addr, send->s_sge[0].length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if (nr_sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		atomic_add(nr_sig, &ic->i_signaled_sends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	failed_wr = &send->s_atomic_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		 send, &send->s_atomic_wr, ret, failed_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	BUG_ON(failed_wr != &send->s_atomic_wr.wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI6c "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		       "returned %d\n", &conn->c_faddr, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		rds_ib_sub_signaled(ic, nr_sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	if (unlikely(failed_wr != &send->s_atomic_wr.wr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		BUG_ON(failed_wr != &send->s_atomic_wr.wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	struct rds_ib_send_work *send = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	struct rds_ib_send_work *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	struct rds_ib_send_work *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	const struct ib_send_wr *failed_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	struct scatterlist *scat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	u64 remote_addr = op->op_remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	u32 max_sge = ic->rds_ibdev->max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	u32 pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	u32 work_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	u32 j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	int sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	int num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	int nr_sig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	u64 odp_addr = op->op_odp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	u32 odp_lkey = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	/* map the op the first time we see it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (!op->op_odp_mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		if (!op->op_mapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			op->op_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 				ib_dma_map_sg(ic->i_cm_id->device, op->op_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 					      op->op_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 					      (op->op_write) ? DMA_TO_DEVICE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 							       DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			rdsdebug("ic %p mapping op %p: %d\n", ic, op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 				 op->op_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			if (op->op_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 				ret = -ENOMEM; /* XXX ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			op->op_mapped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		op->op_count = op->op_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		odp_lkey = rds_ib_get_lkey(op->op_odp_mr->r_trans_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	 * Instead of knowing how to return a partial rdma read/write we insist that there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	 * be enough work requests to send the entire message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	i = DIV_ROUND_UP(op->op_count, max_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	if (work_alloc != i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		rds_ib_stats_inc(s_ib_tx_ring_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	send = &ic->i_sends[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	first = send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	scat = &op->op_sg[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	num_sge = op->op_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		send->s_wr.send_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		send->s_queued = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		send->s_op = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		if (!op->op_notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			nr_sig += rds_ib_set_wr_signal_state(ic, send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 							     op->op_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		send->s_rdma_wr.remote_addr = remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		send->s_rdma_wr.rkey = op->op_rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		if (num_sge > max_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			send->s_rdma_wr.wr.num_sge = max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			num_sge -= max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			send->s_rdma_wr.wr.num_sge = num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		send->s_rdma_wr.wr.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		     scat != &op->op_sg[op->op_count]; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			len = sg_dma_len(scat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			if (!op->op_odp_mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 				send->s_sge[j].addr = sg_dma_address(scat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 				send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 				send->s_sge[j].addr = odp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 				send->s_sge[j].lkey = odp_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			send->s_sge[j].length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			sent += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			remote_addr += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 			odp_addr += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			scat++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			&send->s_rdma_wr.wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			send->s_rdma_wr.wr.num_sge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			send->s_rdma_wr.wr.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		prev = send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			send = ic->i_sends;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	/* give a reference to the last op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	if (scat == &op->op_sg[op->op_count]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		prev->s_op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		rds_message_addref(container_of(op, struct rds_message, rdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if (i < work_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		work_alloc = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	if (nr_sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		atomic_add(nr_sig, &ic->i_signaled_sends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	failed_wr = &first->s_rdma_wr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		 first, &first->s_rdma_wr.wr, ret, failed_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	BUG_ON(failed_wr != &first->s_rdma_wr.wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI6c "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		       "returned %d\n", &conn->c_faddr, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		rds_ib_sub_signaled(ic, nr_sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	if (unlikely(failed_wr != &first->s_rdma_wr.wr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		BUG_ON(failed_wr != &first->s_rdma_wr.wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) void rds_ib_xmit_path_complete(struct rds_conn_path *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	struct rds_connection *conn = cp->cp_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	struct rds_ib_connection *ic = conn->c_transport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	/* We may have a pending ACK or window update we were unable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 * to send previously (due to flow control). Try again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	rds_ib_attempt_ack(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }