Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * licenses.  You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *     Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *     without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *     conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *      - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *        disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *      - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *        disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *        provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <rdma/ib_mad.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/mlx4/cmd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <rdma/ib_cm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include "mlx4_ib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define CM_CLEANUP_CACHE_TIMEOUT  (30 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) struct id_map_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	u32 sl_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	u32 pv_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	int slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	int scheduled_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct mlx4_ib_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct delayed_work timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) struct rej_tmout_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	int slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	u32 rem_pv_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct delayed_work timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct xarray *xa_rej_tmout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) struct cm_generic_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct ib_mad_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	__be32 local_comm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	__be32 remote_comm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	unsigned char unused[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	__be16 rej_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) struct cm_sidr_generic_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	struct ib_mad_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	__be32 request_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) struct cm_req_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	unsigned char unused[0x60];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	union ib_gid primary_path_sgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		struct cm_sidr_generic_msg *msg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			(struct cm_sidr_generic_msg *)mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		msg->request_id = cpu_to_be32(cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	} else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		pr_err("trying to set local_comm_id in SIDR_REP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		msg->local_comm_id = cpu_to_be32(cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static u32 get_local_comm_id(struct ib_mad *mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		struct cm_sidr_generic_msg *msg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			(struct cm_sidr_generic_msg *)mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		return be32_to_cpu(msg->request_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	} else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		pr_err("trying to set local_comm_id in SIDR_REP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		return be32_to_cpu(msg->local_comm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		struct cm_sidr_generic_msg *msg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			(struct cm_sidr_generic_msg *)mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		msg->request_id = cpu_to_be32(cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	} else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		pr_err("trying to set remote_comm_id in SIDR_REQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		msg->remote_comm_id = cpu_to_be32(cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static u32 get_remote_comm_id(struct ib_mad *mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		struct cm_sidr_generic_msg *msg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			(struct cm_sidr_generic_msg *)mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		return be32_to_cpu(msg->request_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	} else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		pr_err("trying to set remote_comm_id in SIDR_REQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		return be32_to_cpu(msg->remote_comm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	struct cm_req_msg *msg = (struct cm_req_msg *)mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	return msg->primary_path_sgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Lock should be taken before called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static struct id_map_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	struct rb_node *node = sl_id_map->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		struct id_map_entry *id_map_entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			rb_entry(node, struct id_map_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		if (id_map_entry->sl_cm_id > sl_cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		else if (id_map_entry->sl_cm_id < sl_cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		else if (id_map_entry->slave_id > slave_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		else if (id_map_entry->slave_id < slave_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			return id_map_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void id_map_ent_timeout(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct delayed_work *delay = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct id_map_entry *found_ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct mlx4_ib_dev *dev = ent->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	struct mlx4_ib_sriov *sriov = &dev->sriov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	struct rb_root *sl_id_map = &sriov->sl_id_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	spin_lock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (found_ent && found_ent == ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		rb_erase(&found_ent->node, sl_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	list_del(&ent->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	spin_unlock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	kfree(ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	struct id_map_entry *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	int slave_id = new->slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	int sl_cm_id = new->sl_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (ent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			 sl_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		rb_replace_node(&ent->node, &new->node, sl_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	/* Go to the bottom of the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	while (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		parent = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		ent = rb_entry(parent, struct id_map_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			link = &(*link)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			link = &(*link)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	rb_link_node(&new->node, parent, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	rb_insert_color(&new->node, sl_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static struct id_map_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	struct id_map_entry *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (!ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	ent->sl_cm_id = sl_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	ent->slave_id = slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	ent->scheduled_delete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	ent->dev = to_mdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		spin_lock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		sl_id_map_add(ibdev, ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		list_add_tail(&ent->list, &sriov->cm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		spin_unlock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		return ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	/*error flow*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	kfree(ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static struct id_map_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	struct id_map_entry *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	spin_lock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if (*pv_cm_id == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		if (ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			*pv_cm_id = (int) ent->pv_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	spin_unlock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	return ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	spin_lock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	spin_lock_irqsave(&sriov->going_down_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	/*make sure that there is no schedule inside the scheduled work.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	if (!sriov->is_going_down && !id->scheduled_delete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		id->scheduled_delete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	} else if (id->scheduled_delete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		/* Adjust timeout if already scheduled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	spin_unlock_irqrestore(&sriov->going_down_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	spin_unlock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		struct ib_mad *mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	struct id_map_entry *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	u32 sl_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	int pv_cm_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	    mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	    mad->mad_hdr.attr_id == CM_MRA_ATTR_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	    mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	    (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		sl_cm_id = get_local_comm_id(mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		if (id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		id = id_map_alloc(ibdev, slave_id, sl_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		if (IS_ERR(id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 				__func__, slave_id, sl_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			return PTR_ERR(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	} else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		   mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		sl_cm_id = get_local_comm_id(mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	if (!id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			 slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) cont:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	set_local_comm_id(mad, id->pv_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		schedule_delayed(ibdev, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static void rej_tmout_timeout(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	struct delayed_work *delay = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	struct rej_tmout_entry *deleted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	if (deleted != item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		pr_debug("deleted(%p) != item(%p)\n", deleted, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	kfree(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	struct rej_tmout_entry *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct rej_tmout_entry *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	xa_lock(&sriov->xa_rej_tmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	if (item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		if (xa_err(item))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			ret =  xa_err(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			/* If a retry, adjust delayed work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		goto err_or_exists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	xa_unlock(&sriov->xa_rej_tmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	item = kmalloc(sizeof(*item), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	if (!item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	item->slave = slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	item->rem_pv_cm_id = rem_pv_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	item->xa_rej_tmout = &sriov->xa_rej_tmout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		pr_debug(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 			"Non-null old entry (%p) or error (%d) when inserting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 			old, xa_err(old));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		kfree(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		return xa_err(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) err_or_exists:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	xa_unlock(&sriov->xa_rej_tmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	struct rej_tmout_entry *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	int slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	xa_lock(&sriov->xa_rej_tmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	if (!item || xa_err(item)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 			 rem_pv_cm_id, xa_err(item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		slave = !item ? -ENOENT : xa_err(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		slave = item->slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	xa_unlock(&sriov->xa_rej_tmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	return slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			     struct ib_mad *mad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	u32 rem_pv_cm_id = get_local_comm_id(mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	u32 pv_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	struct id_map_entry *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	int sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	    mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		union ib_gid gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		if (!slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		gid = gid_from_req_msg(ibdev, mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		if (*slave < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 				     be64_to_cpu(gid.global.interface_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		if (sts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			/* Even if this fails, we pass on the REQ to the slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 				 rem_pv_cm_id, *slave, sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	pv_cm_id = get_remote_comm_id(mad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	if (!id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		    REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			*slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			return (*slave < 0) ? *slave : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 			 pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	if (slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		*slave = id->slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	set_remote_comm_id(mad, id->sl_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	    mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		schedule_delayed(ibdev, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	spin_lock_init(&dev->sriov.id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	INIT_LIST_HEAD(&dev->sriov.cm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	dev->sriov.sl_id_map = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	xa_init(&dev->sriov.xa_rej_tmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	struct rej_tmout_entry *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	bool flush_needed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	unsigned long id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	xa_lock(&sriov->xa_rej_tmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	xa_for_each(&sriov->xa_rej_tmout, id, item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		if (slave < 0 || slave == item->slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 			mod_delayed_work(system_wq, &item->timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 			flush_needed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			++cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	xa_unlock(&sriov->xa_rej_tmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	if (flush_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		flush_scheduled_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 			 cnt, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	if (slave < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		WARN_ON(!xa_empty(&sriov->xa_rej_tmout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* slave = -1 ==> all slaves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* TBD -- call paravirt clean for single slave.  Need for slave RESET event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	struct mlx4_ib_sriov *sriov = &dev->sriov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	struct rb_root *sl_id_map = &sriov->sl_id_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	struct list_head lh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	struct rb_node *nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	int need_flush = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	struct id_map_entry *map, *tmp_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	/* cancel all delayed work queue entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	INIT_LIST_HEAD(&lh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	spin_lock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		if (slave < 0 || slave == map->slave_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 			if (map->scheduled_delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 				need_flush |= !cancel_delayed_work(&map->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	spin_unlock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	if (need_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		flush_scheduled_work(); /* make sure all timers were flushed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	/* now, remove all leftover entries from databases*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	spin_lock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	if (slave < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		while (rb_first(sl_id_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 			struct id_map_entry *ent =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 				rb_entry(rb_first(sl_id_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 					 struct id_map_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			rb_erase(&ent->node, sl_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		list_splice_init(&dev->sriov.cm_list, &lh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		/* first, move nodes belonging to slave to db remove list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		nd = rb_first(sl_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		while (nd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 			struct id_map_entry *ent =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 				rb_entry(nd, struct id_map_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			nd = rb_next(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 			if (ent->slave_id == slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 				list_move_tail(&ent->list, &lh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		/* remove those nodes from databases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		list_for_each_entry_safe(map, tmp_map, &lh, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 			rb_erase(&map->node, sl_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			xa_erase(&sriov->pv_id_table, map->pv_cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		/* add remaining nodes from cm_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 			if (slave == map->slave_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 				list_move_tail(&map->list, &lh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	spin_unlock(&sriov->id_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	/* free any map entries left behind due to cancel_delayed_work above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	list_for_each_entry_safe(map, tmp_map, &lh, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		list_del(&map->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	rej_tmout_xa_cleanup(sriov, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }