^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Shared Memory Communications over RDMA (SMC-R) and RoCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Definitions for IB environment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright IBM Corp. 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author(s): Ursula Braun <Ursula Braun@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #ifndef _SMC_IB_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define _SMC_IB_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/smc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define SMC_MAX_PORTS 2 /* Max # of ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define SMC_GID_SIZE sizeof(union ib_gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define SMC_IB_MAX_SEND_SGE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct smc_ib_devices { /* list of smc ib devices definition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct mutex mutex; /* protects list of smc ib devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) extern struct smc_ib_devices smc_ib_devices; /* list of smc ib devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct smc_ib_device { /* ib-device infos for smc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct ib_device *ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct ib_port_attr pattr[SMC_MAX_PORTS]; /* ib dev. port attrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct ib_event_handler event_handler; /* global ib_event handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct ib_cq *roce_cq_send; /* send completion queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct ib_cq *roce_cq_recv; /* recv completion queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct tasklet_struct send_tasklet; /* called by send cq handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct tasklet_struct recv_tasklet; /* called by recv cq handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) char mac[SMC_MAX_PORTS][ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* mac address per port*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u8 pnetid[SMC_MAX_PORTS][SMC_MAX_PNETID_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* pnetid per port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) bool pnetid_by_user[SMC_MAX_PORTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* pnetid defined by user? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u8 initialized : 1; /* ib dev CQ, evthdl done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct work_struct port_event_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long port_event_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) DECLARE_BITMAP(ports_going_away, SMC_MAX_PORTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) atomic_t lnk_cnt; /* number of links on ibdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) wait_queue_head_t lnks_deleted; /* wait 4 removal of all links*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct mutex mutex; /* protect dev setup+cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct smc_buf_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct smc_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int smc_ib_register_client(void) __init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) void smc_ib_unregister_client(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int smc_ib_buf_map_sg(struct smc_link *lnk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct smc_buf_desc *buf_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) enum dma_data_direction data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) void smc_ib_buf_unmap_sg(struct smc_link *lnk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct smc_buf_desc *buf_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) enum dma_data_direction data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) void smc_ib_dealloc_protection_domain(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int smc_ib_create_protection_domain(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void smc_ib_destroy_queue_pair(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int smc_ib_create_queue_pair(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int smc_ib_ready_link(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int smc_ib_modify_qp_rts(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int smc_ib_modify_qp_reset(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int smc_ib_modify_qp_error(struct smc_link *lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct smc_buf_desc *buf_slot, u8 link_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void smc_ib_put_memory_region(struct ib_mr *mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct smc_buf_desc *buf_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) enum dma_data_direction data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void smc_ib_sync_sg_for_device(struct smc_link *lnk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct smc_buf_desc *buf_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) enum dma_data_direction data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned short vlan_id, u8 gid[], u8 *sgid_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) bool smc_ib_is_valid_local_systemid(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #endif