Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <rdma/rdma_netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include "rxe.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "rxe_loc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) MODULE_DESCRIPTION("Soft RDMA transport");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) MODULE_LICENSE("Dual BSD/GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) bool rxe_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) /* free resources for a rxe device all objects created for this device must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * have been destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) void rxe_dealloc(struct ib_device *ib_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	rxe_pool_cleanup(&rxe->uc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	rxe_pool_cleanup(&rxe->pd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	rxe_pool_cleanup(&rxe->ah_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	rxe_pool_cleanup(&rxe->srq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	rxe_pool_cleanup(&rxe->qp_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	rxe_pool_cleanup(&rxe->cq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	rxe_pool_cleanup(&rxe->mr_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	rxe_pool_cleanup(&rxe->mw_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	rxe_pool_cleanup(&rxe->mc_grp_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	rxe_pool_cleanup(&rxe->mc_elem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	if (rxe->tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		crypto_free_shash(rxe->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) /* initialize rxe device parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static void rxe_init_device_param(struct rxe_dev *rxe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	rxe->max_inline_data			= RXE_MAX_INLINE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	rxe->attr.vendor_id			= RXE_VENDOR_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	rxe->attr.max_mr_size			= RXE_MAX_MR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	rxe->attr.page_size_cap			= RXE_PAGE_SIZE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	rxe->attr.max_qp			= RXE_MAX_QP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	rxe->attr.max_qp_wr			= RXE_MAX_QP_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	rxe->attr.device_cap_flags		= RXE_DEVICE_CAP_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	rxe->attr.max_send_sge			= RXE_MAX_SGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	rxe->attr.max_recv_sge			= RXE_MAX_SGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	rxe->attr.max_sge_rd			= RXE_MAX_SGE_RD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	rxe->attr.max_cq			= RXE_MAX_CQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	rxe->attr.max_cqe			= (1 << RXE_MAX_LOG_CQE) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	rxe->attr.max_mr			= RXE_MAX_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	rxe->attr.max_pd			= RXE_MAX_PD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	rxe->attr.max_qp_rd_atom		= RXE_MAX_QP_RD_ATOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	rxe->attr.max_res_rd_atom		= RXE_MAX_RES_RD_ATOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	rxe->attr.max_qp_init_rd_atom		= RXE_MAX_QP_INIT_RD_ATOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	rxe->attr.atomic_cap			= IB_ATOMIC_HCA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	rxe->attr.max_mcast_grp			= RXE_MAX_MCAST_GRP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	rxe->attr.max_mcast_qp_attach		= RXE_MAX_MCAST_QP_ATTACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	rxe->attr.max_total_mcast_qp_attach	= RXE_MAX_TOT_MCAST_QP_ATTACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	rxe->attr.max_ah			= RXE_MAX_AH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	rxe->attr.max_srq			= RXE_MAX_SRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	rxe->attr.max_srq_wr			= RXE_MAX_SRQ_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	rxe->attr.max_srq_sge			= RXE_MAX_SRQ_SGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	rxe->attr.max_fast_reg_page_list_len	= RXE_MAX_FMR_PAGE_LIST_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	rxe->attr.max_pkeys			= RXE_MAX_PKEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	rxe->attr.local_ca_ack_delay		= RXE_LOCAL_CA_ACK_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 			rxe->ndev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	rxe->max_ucontext			= RXE_MAX_UCONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) /* initialize port attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static void rxe_init_port_param(struct rxe_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	port->attr.state		= IB_PORT_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	port->attr.max_mtu		= IB_MTU_4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	port->attr.active_mtu		= IB_MTU_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	port->attr.gid_tbl_len		= RXE_PORT_GID_TBL_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	port->attr.port_cap_flags	= RXE_PORT_PORT_CAP_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	port->attr.max_msg_sz		= RXE_PORT_MAX_MSG_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	port->attr.bad_pkey_cntr	= RXE_PORT_BAD_PKEY_CNTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	port->attr.qkey_viol_cntr	= RXE_PORT_QKEY_VIOL_CNTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	port->attr.pkey_tbl_len		= RXE_PORT_PKEY_TBL_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	port->attr.lid			= RXE_PORT_LID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	port->attr.sm_lid		= RXE_PORT_SM_LID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	port->attr.lmc			= RXE_PORT_LMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	port->attr.max_vl_num		= RXE_PORT_MAX_VL_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	port->attr.sm_sl		= RXE_PORT_SM_SL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	port->attr.subnet_timeout	= RXE_PORT_SUBNET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	port->attr.init_type_reply	= RXE_PORT_INIT_TYPE_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	port->attr.active_width		= RXE_PORT_ACTIVE_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	port->attr.active_speed		= RXE_PORT_ACTIVE_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	port->attr.phys_state		= RXE_PORT_PHYS_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	port->mtu_cap			= ib_mtu_enum_to_int(IB_MTU_256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	port->subnet_prefix		= cpu_to_be64(RXE_PORT_SUBNET_PREFIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* initialize port state, note IB convention that HCA ports are always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * numbered from 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void rxe_init_ports(struct rxe_dev *rxe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct rxe_port *port = &rxe->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	rxe_init_port_param(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	addrconf_addr_eui48((unsigned char *)&port->port_guid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			    rxe->ndev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	spin_lock_init(&port->port_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* init pools of managed objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static int rxe_init_pools(struct rxe_dev *rxe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	err = rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 			    rxe->max_ucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	err = rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			    rxe->attr.max_pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	err = rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			    rxe->attr.max_ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	err = rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			    rxe->attr.max_srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		goto err4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	err = rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			    rxe->attr.max_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		goto err5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	err = rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			    rxe->attr.max_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		goto err6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	err = rxe_pool_init(rxe, &rxe->mr_pool, RXE_TYPE_MR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			    rxe->attr.max_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		goto err7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	err = rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 			    rxe->attr.max_mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		goto err8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	err = rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			    rxe->attr.max_mcast_grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		goto err9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	err = rxe_pool_init(rxe, &rxe->mc_elem_pool, RXE_TYPE_MC_ELEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			    rxe->attr.max_total_mcast_qp_attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		goto err10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) err10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	rxe_pool_cleanup(&rxe->mc_grp_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) err9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	rxe_pool_cleanup(&rxe->mw_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) err8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	rxe_pool_cleanup(&rxe->mr_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) err7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	rxe_pool_cleanup(&rxe->cq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) err6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	rxe_pool_cleanup(&rxe->qp_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) err5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	rxe_pool_cleanup(&rxe->srq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) err4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	rxe_pool_cleanup(&rxe->ah_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	rxe_pool_cleanup(&rxe->pd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	rxe_pool_cleanup(&rxe->uc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* initialize rxe device state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static int rxe_init(struct rxe_dev *rxe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	/* init default device parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	rxe_init_device_param(rxe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	rxe_init_ports(rxe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	err = rxe_init_pools(rxe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	/* init pending mmap list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	spin_lock_init(&rxe->mmap_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	spin_lock_init(&rxe->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	INIT_LIST_HEAD(&rxe->pending_mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	mutex_init(&rxe->usdev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	struct rxe_port *port = &rxe->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	enum ib_mtu mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	mtu = eth_mtu_int_to_enum(ndev_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	/* Make sure that new MTU in range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	mtu = mtu ? min_t(enum ib_mtu, mtu, IB_MTU_4096) : IB_MTU_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	port->attr.active_mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	port->mtu_cap = ib_mtu_enum_to_int(mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* called by ifc layer to create new rxe device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  * The caller should allocate memory for rxe by calling ib_alloc_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	err = rxe_init(rxe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	rxe_set_mtu(rxe, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	return rxe_register_device(rxe, ibdev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	struct rxe_dev *exists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (is_vlan_dev(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		pr_err("rxe creation allowed on top of a real device only\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	exists = rxe_get_dev_from_net(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (exists) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		ib_device_put(&exists->ib_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		pr_err("already configured on %s\n", ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	err = rxe_net_add(ibdev_name, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		pr_err("failed to add %s\n", ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static struct rdma_link_ops rxe_link_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	.type = "rxe",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	.newlink = rxe_newlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static int __init rxe_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	err = rxe_net_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	rdma_link_register(&rxe_link_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	rxe_initialized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	pr_info("loaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void __exit rxe_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	rdma_link_unregister(&rxe_link_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	ib_unregister_driver(RDMA_DRIVER_RXE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	rxe_net_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	rxe_initialized = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	pr_info("unloaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) late_initcall(rxe_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) module_exit(rxe_module_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) MODULE_ALIAS_RDMA_LINK("rxe");