^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2016 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "ib_mr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) rds_transition_frwr_state(struct rds_ib_mr *ibmr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) enum rds_ib_fr_state old_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) enum rds_ib_fr_state new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (cmpxchg(&ibmr->u.frmr.fr_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) old_state, new_state) == old_state &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) old_state == FRMR_IS_INUSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* enforce order of ibmr->u.frmr.fr_state update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * before decrementing i_fastreg_inuse_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) atomic_dec(&ibmr->ic->i_fastreg_inuse_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (waitqueue_active(&rds_ib_ring_empty_wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) wake_up(&rds_ib_ring_empty_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct rds_ib_mr_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct rds_ib_mr *ibmr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct rds_ib_frmr *frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (npages <= RDS_MR_8K_MSG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) pool = rds_ibdev->mr_8k_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) pool = rds_ibdev->mr_1m_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) ibmr = rds_ib_try_reuse_ibmr(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (ibmr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) rdsibdev_to_node(rds_ibdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (!ibmr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) goto out_no_cigar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) frmr = &ibmr->u.frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) pool->max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (IS_ERR(frmr->mr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) pr_warn("RDS/IB: %s failed to allocate MR", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) err = PTR_ERR(frmr->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) goto out_no_cigar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ibmr->pool = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (pool->pool_type == RDS_IB_MR_8K_POOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (atomic_read(&pool->item_count) > pool->max_items_soft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pool->max_items_soft = pool->max_items;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) frmr->fr_state = FRMR_IS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) init_waitqueue_head(&frmr->fr_inv_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) init_waitqueue_head(&frmr->fr_reg_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) out_no_cigar:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) kfree(ibmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) atomic_dec(&pool->item_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void rds_ib_free_frmr(struct rds_ib_mr *ibmr, bool drop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct rds_ib_mr_pool *pool = ibmr->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (drop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) llist_add(&ibmr->llnode, &pool->drop_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) llist_add(&ibmr->llnode, &pool->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) atomic_add(ibmr->sg_len, &pool->free_pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) atomic_inc(&pool->dirty_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* If we've pinned too many pages, request a flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) atomic_read(&pool->dirty_count) >= pool->max_items / 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct rds_ib_frmr *frmr = &ibmr->u.frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct ib_reg_wr reg_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int ret, off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) atomic_inc(&ibmr->ic->i_fastreg_wrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) &off, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (unlikely(ret != ibmr->sg_dma_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return ret < 0 ? ret : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (cmpxchg(&frmr->fr_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Perform a WR for the fast_reg_mr. Each individual page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * in the sg list is added to the fast reg page list and placed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * inside the fast_reg_mr WR. The key used is a rolling 8bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * counter, which should guarantee uniqueness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) frmr->fr_reg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) memset(®_wr, 0, sizeof(reg_wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) reg_wr.wr.wr_id = (unsigned long)(void *)ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) reg_wr.wr.opcode = IB_WR_REG_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) reg_wr.wr.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) reg_wr.mr = frmr->mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) reg_wr.key = frmr->mr->rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) reg_wr.access = IB_ACCESS_LOCAL_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) IB_ACCESS_REMOTE_READ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) IB_ACCESS_REMOTE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) reg_wr.wr.send_flags = IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ret = ib_post_send(ibmr->ic->i_cm_id->qp, ®_wr.wr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Failure here can be because of -ENOMEM as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) atomic_inc(&ibmr->ic->i_fastreg_wrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) pr_warn("RDS/IB: %s returned error(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* Wait for the registration to complete in order to prevent an invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * access error resulting from a race between the memory region already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * being accessed while registration is still pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) wait_event(frmr->fr_reg_done, !frmr->fr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct rds_ib_mr_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct rds_ib_mr *ibmr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct scatterlist *sg, unsigned int sg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct ib_device *dev = rds_ibdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct rds_ib_frmr *frmr = &ibmr->u.frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* We want to teardown old ibmr values here and fill it up with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * new sg values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) rds_ib_teardown_mr(ibmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ibmr->sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ibmr->sg_len = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ibmr->sg_dma_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) frmr->sg_byte_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) WARN_ON(ibmr->sg_dma_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (unlikely(!ibmr->sg_dma_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) pr_warn("RDS/IB: %s failed!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) frmr->sg_byte_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) frmr->dma_npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) for (i = 0; i < ibmr->sg_dma_len; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned int dma_len = sg_dma_len(&ibmr->sg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u64 dma_addr = sg_dma_address(&ibmr->sg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) frmr->sg_byte_len += dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (dma_addr & ~PAGE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (i > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ++frmr->dma_npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if ((dma_addr + dma_len) & ~PAGE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (i < ibmr->sg_dma_len - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ++frmr->dma_npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) len += dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) frmr->dma_npages += len >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (frmr->dma_npages > ibmr->pool->max_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ret = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ret = rds_ib_post_reg_frmr(ibmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) out_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ibmr->sg_dma_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct ib_send_wr *s_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct rds_ib_frmr *frmr = &ibmr->u.frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (!i_cm_id || !i_cm_id->qp || !frmr->mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (frmr->fr_state != FRMR_IS_INUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) atomic_inc(&ibmr->ic->i_fastreg_wrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) frmr->fr_inv = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) s_wr = &frmr->fr_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) memset(s_wr, 0, sizeof(*s_wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) s_wr->wr_id = (unsigned long)(void *)ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) s_wr->opcode = IB_WR_LOCAL_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) s_wr->ex.invalidate_rkey = frmr->mr->rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) s_wr->send_flags = IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ret = ib_post_send(i_cm_id->qp, s_wr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) frmr->fr_inv = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* enforce order of frmr->fr_inv update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * before incrementing i_fastreg_wrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) atomic_inc(&ibmr->ic->i_fastreg_wrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Wait for the FRMR_IS_FREE (or FRMR_IS_STALE) transition in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * 1) avoid a silly bouncing between "clean_list" and "drop_list"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * triggered by function "rds_ib_reg_frmr" as it is releases frmr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * regions whose state is not "FRMR_IS_FREE" right away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * 2) prevents an invalid access error in a race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * from a pending "IB_WR_LOCAL_INV" operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * with a teardown ("dma_unmap_sg", "put_page")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * and de-registration ("ib_dereg_mr") of the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * memory region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) wait_event(frmr->fr_inv_done, frmr->fr_state != FRMR_IS_INUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct rds_ib_frmr *frmr = &ibmr->u.frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (wc->status != IB_WC_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (rds_conn_up(ic->conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) rds_ib_conn_error(ic->conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) "frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) &ic->conn->c_laddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) &ic->conn->c_faddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) wc->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ib_wc_status_msg(wc->status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) wc->vendor_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (frmr->fr_inv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) frmr->fr_inv = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) wake_up(&frmr->fr_inv_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (frmr->fr_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) frmr->fr_reg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) wake_up(&frmr->fr_reg_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* enforce order of frmr->{fr_reg,fr_inv} update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * before incrementing i_fastreg_wrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) atomic_inc(&ic->i_fastreg_wrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long *unpinned, unsigned int goal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct rds_ib_mr *ibmr, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct rds_ib_frmr *frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) int ret = 0, ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unsigned int freed = *nfreed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) list_for_each_entry(ibmr, list, unmap_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (ibmr->sg_dma_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ret2 = rds_ib_post_inv(ibmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (ret2 && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* Now we can destroy the DMA mapping and unpin any pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) list_for_each_entry_safe(ibmr, next, list, unmap_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *unpinned += ibmr->sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) frmr = &ibmr->u.frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) __rds_ib_teardown_mr(ibmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (freed < goal || frmr->fr_state == FRMR_IS_STALE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Don't de-allocate if the MR is not free yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (frmr->fr_state == FRMR_IS_INUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) list_del(&ibmr->unmap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (frmr->mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) ib_dereg_mr(frmr->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) kfree(ibmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) freed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) *nfreed = freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct rds_ib_connection *ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned long nents, u32 *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct rds_ib_mr *ibmr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct rds_ib_frmr *frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!ic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (ibmr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) rds_ib_free_frmr(ibmr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ibmr = rds_ib_alloc_frmr(rds_ibdev, nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (IS_ERR(ibmr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) frmr = &ibmr->u.frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) } while (frmr->fr_state != FRMR_IS_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ibmr->ic = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) ibmr->device = rds_ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *key = frmr->mr->rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) rds_ib_free_frmr(ibmr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ibmr = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) void rds_ib_free_frmr_list(struct rds_ib_mr *ibmr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct rds_ib_mr_pool *pool = ibmr->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct rds_ib_frmr *frmr = &ibmr->u.frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (frmr->fr_state == FRMR_IS_STALE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) llist_add(&ibmr->llnode, &pool->drop_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) llist_add(&ibmr->llnode, &pool->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }