^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2007, 2020 Oracle and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "rds.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * XXX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * - build with sparse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * - should we detect duplicate keys on a socket? hmm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * - an rdma is an mlock, apply rlimit?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * get the number of pages by looking at the page indices that the start and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * end addresses fall in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * Returns 0 if the vec is invalid. It is invalid if the number of bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * causes the address to wrap or overflows an unsigned int. This comes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * from being stored in the 'length' member of 'struct scatterlist'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if ((vec->addr + vec->bytes <= vec->addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) (vec->bytes > (u64)UINT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) (vec->addr >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct rds_mr *insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct rb_node **p = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct rds_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) mr = rb_entry(parent, struct rds_mr, r_rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (key < mr->r_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) else if (key > mr->r_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (insert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) rb_link_node(&insert->r_rb_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) rb_insert_color(&insert->r_rb_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) kref_get(&insert->r_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Destroy the transport-specific part of a MR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void rds_destroy_mr(struct rds_mr *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct rds_sock *rs = mr->r_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void *trans_private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) mr->r_key, kref_read(&mr->r_kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) spin_lock_irqsave(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!RB_EMPTY_NODE(&mr->r_rb_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) trans_private = mr->r_trans_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) mr->r_trans_private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (trans_private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) mr->r_trans->free_mr(trans_private, mr->r_invalidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) void __rds_put_mr_final(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct rds_mr *mr = container_of(kref, struct rds_mr, r_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) rds_destroy_mr(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) kfree(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * By the time this is called we can't have any more ioctls called on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * the socket so we don't need to worry about racing with others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void rds_rdma_drop_keys(struct rds_sock *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct rds_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Release any MRs associated with this socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) spin_lock_irqsave(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) while ((node = rb_first(&rs->rs_rdma_keys))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) mr = rb_entry(node, struct rds_mr, r_rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (mr->r_trans == rs->rs_transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) mr->r_invalidate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) RB_CLEAR_NODE(&mr->r_rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) kref_put(&mr->r_kref, __rds_put_mr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) spin_lock_irqsave(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (rs->rs_transport && rs->rs_transport->flush_mrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) rs->rs_transport->flush_mrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Helper function to pin user pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct page **pages, int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned int gup_flags = FOLL_LONGTERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) gup_flags |= FOLL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ret = pin_user_pages_fast(user_addr, nr_pages, gup_flags, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (ret >= 0 && ret < nr_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unpin_user_pages(pages, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u64 *cookie_ret, struct rds_mr **mr_ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct rds_conn_path *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct rds_mr *mr = NULL, *found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct scatterlist *sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct page **pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) void *trans_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) rds_rdma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unsigned int nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int need_odp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ret = -ENOTCONN; /* XXX not a great errno */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!rs->rs_transport->get_mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* If the combination of the addr and size requested for this memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * region causes an integer overflow, return error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (((args->vec.addr + args->vec.bytes) < args->vec.addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) PAGE_ALIGN(args->vec.addr + args->vec.bytes) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) (args->vec.addr + args->vec.bytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (!can_do_mlock()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) nr_pages = rds_pages_in_vec(&args->vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (nr_pages == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Restrict the size of mr irrespective of underlying transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * To account for unaligned mr regions, subtract one from nr_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ret = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) args->vec.addr, args->vec.bytes, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* XXX clamp nr_pages to limit the size of this alloc? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) kref_init(&mr->r_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) RB_CLEAR_NODE(&mr->r_rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) mr->r_trans = rs->rs_transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) mr->r_sock = rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (args->flags & RDS_RDMA_USE_ONCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) mr->r_use_once = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (args->flags & RDS_RDMA_INVALIDATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) mr->r_invalidate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (args->flags & RDS_RDMA_READWRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) mr->r_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * Pin the pages that make up the user buffer and transfer the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * pointers to the mr's sg array. We check to see if we've mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * the whole region after transferring the partial page references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * to the sg array so that we can have one page ref cleanup path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * For now we have no flag that tells us whether the mapping is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * the zero page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (ret == -EOPNOTSUPP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) need_odp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) } else if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) nents = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (!sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) WARN_ON(!nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) sg_init_table(sg, nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* Stick all pages into the scatterlist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) for (i = 0 ; i < nents; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) rdsdebug("RDS: trans_private nents is %u\n", nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Obtain a transport specific MR. If this succeeds, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * s/g list is now owned by the MR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * Note that dma_map() implies that pending writes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * flushed to RAM, so no dma_sync is needed here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) trans_private = rs->rs_transport->get_mr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) sg, nents, rs, &mr->r_key, cp ? cp->cp_conn : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) args->vec.addr, args->vec.bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) need_odp ? ODP_ZEROBASED : ODP_NOT_NEEDED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (IS_ERR(trans_private)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* In ODP case, we don't GUP pages, so don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * to release anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!need_odp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unpin_user_pages(pages, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) kfree(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ret = PTR_ERR(trans_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) mr->r_trans_private = trans_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) mr->r_key, (void *)(unsigned long) args->cookie_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* The user may pass us an unaligned address, but we can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * map page aligned regions. So we keep the offset, and build
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * a 64bit cookie containing <R_Key, offset> and pass that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * around. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (need_odp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) cookie = rds_rdma_make_cookie(mr->r_key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) cookie = rds_rdma_make_cookie(mr->r_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) args->vec.addr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (cookie_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *cookie_ret = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (args->cookie_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!need_odp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unpin_user_pages(pages, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) kfree(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* Inserting the new MR into the rbtree bumps its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * reference count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spin_lock_irqsave(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) BUG_ON(found && found != mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (mr_ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) kref_get(&mr->r_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *mr_ret = mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) kref_put(&mr->r_kref, __rds_put_mr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct rds_get_mr_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (optlen != sizeof(struct rds_get_mr_args))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_args)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct rds_get_mr_for_dest_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct rds_get_mr_args new_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (optlen != sizeof(struct rds_get_mr_for_dest_args))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (copy_from_sockptr(&args, optval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) sizeof(struct rds_get_mr_for_dest_args)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Initially, just behave like get_mr().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * TODO: Implement get_mr as wrapper around this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * and deprecate it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) new_args.vec = args.vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) new_args.cookie_addr = args.cookie_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) new_args.flags = args.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Free the MR indicated by the given R_Key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct rds_free_mr_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct rds_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (optlen != sizeof(struct rds_free_mr_args))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (copy_from_sockptr(&args, optval, sizeof(struct rds_free_mr_args)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* Special case - a null cookie means flush all unused MRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (args.cookie == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) rs->rs_transport->flush_mrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* Look up the MR given its R_key and remove it from the rbtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * so nobody else finds it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * This should also prevent races with rds_rdma_unuse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) spin_lock_irqsave(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) RB_CLEAR_NODE(&mr->r_rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (args.flags & RDS_RDMA_INVALIDATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) mr->r_invalidate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (!mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) kref_put(&mr->r_kref, __rds_put_mr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * This is called when we receive an extension header that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * tells us this MR was used. It allows us to implement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * use_once semantics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct rds_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int zot_me = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) spin_lock_irqsave(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) r_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* Get a reference so that the MR won't go away before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * sync_mr() below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) kref_get(&mr->r_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* If it is going to be freed, remove it from the tree now so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * that no other thread can find it and free it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (mr->r_use_once || force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) RB_CLEAR_NODE(&mr->r_rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) zot_me = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* May have to issue a dma_sync on this memory region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * Note we could avoid this if the operation was a RDMA READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * but at this point we can't tell. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (mr->r_trans->sync_mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* Release the reference held above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) kref_put(&mr->r_kref, __rds_put_mr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* If the MR was marked as invalidate, this will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * trigger an async flush. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (zot_me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) kref_put(&mr->r_kref, __rds_put_mr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) void rds_rdma_free_op(struct rm_rdma_op *ro)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (ro->op_odp_mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) kref_put(&ro->op_odp_mr->r_kref, __rds_put_mr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) for (i = 0; i < ro->op_nents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct page *page = sg_page(&ro->op_sg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* Mark page dirty if it was possibly modified, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * is the case for a RDMA_READ which copies from remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * to local memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) unpin_user_pages_dirty_lock(&page, 1, !ro->op_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) kfree(ro->op_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ro->op_notifier = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ro->op_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ro->op_odp_mr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) void rds_atomic_free_op(struct rm_atomic_op *ao)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct page *page = sg_page(ao->op_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Mark page dirty if it was possibly modified, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * is the case for a RDMA_READ which copies from remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * to local memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) unpin_user_pages_dirty_lock(&page, 1, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) kfree(ao->op_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ao->op_notifier = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ao->op_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Count the number of pages needed to describe an incoming iovec array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int tot_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) unsigned int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* figure out the number of pages in the vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) for (i = 0; i < nr_iovecs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) nr_pages = rds_pages_in_vec(&iov[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (nr_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) tot_pages += nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * so tot_pages cannot overflow without first going negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (tot_pages < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return tot_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int rds_rdma_extra_size(struct rds_rdma_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct rds_iov_vector *iov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct rds_iovec *vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct rds_iovec __user *local_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int tot_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) unsigned int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (args->nr_local == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (args->nr_local > UIO_MAXIOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) iov->iov = kcalloc(args->nr_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) sizeof(struct rds_iovec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!iov->iov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) vec = &iov->iov[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (copy_from_user(vec, local_vec, args->nr_local *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) sizeof(struct rds_iovec)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) iov->len = args->nr_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* figure out the number of pages in the vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) for (i = 0; i < args->nr_local; i++, vec++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) nr_pages = rds_pages_in_vec(vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (nr_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) tot_pages += nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * so tot_pages cannot overflow without first going negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (tot_pages < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return tot_pages * sizeof(struct scatterlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * The application asks for a RDMA transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * Extract all arguments and set up the rdma_op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct cmsghdr *cmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct rds_iov_vector *vec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct rds_rdma_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct rm_rdma_op *op = &rm->rdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) unsigned int nr_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct page **pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct rds_iovec *iovs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) bool odp_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) || rm->rdma.op_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) args = CMSG_DATA(cmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (ipv6_addr_any(&rs->rs_bound_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ret = -ENOTCONN; /* XXX not a great errno */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) goto out_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (args->nr_local > UIO_MAXIOV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ret = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) goto out_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (vec->len != args->nr_local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) goto out_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /* odp-mr is not supported for multiple requests within one message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (args->nr_local != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) odp_supported = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) iovs = vec->iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) nr_pages = rds_rdma_pages(iovs, args->nr_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (nr_pages < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto out_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) goto out_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) op->op_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) op->op_recverr = rs->rs_recverr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) op->op_odp_mr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) WARN_ON(!nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (IS_ERR(op->op_sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ret = PTR_ERR(op->op_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto out_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (op->op_notify || op->op_recverr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* We allocate an uninitialized notifier here, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * we don't want to do that in the completion handler. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * would have to use GFP_ATOMIC there, and don't want to deal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * with failed allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (!op->op_notifier) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) goto out_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) op->op_notifier->n_user_token = args->user_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) op->op_notifier->n_status = RDS_RDMA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* The cookie contains the R_Key of the remote memory region, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * optionally an offset into it. This is how we implement RDMA into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * unaligned memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * When setting up the RDMA, we need to add that offset to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * destination address (which is really an offset into the MR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * FIXME: We may want to move this into ib_rdma.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) op->op_rkey = rds_rdma_cookie_key(args->cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) nr_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) (unsigned long long)args->nr_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) (unsigned long long)args->remote_vec.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) op->op_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) for (i = 0; i < args->nr_local; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct rds_iovec *iov = &iovs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) unsigned int nr = rds_pages_in_vec(iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) rs->rs_user_addr = iov->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) rs->rs_user_bytes = iov->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* If it's a WRITE operation, we want to pin the pages for reading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * If it's a READ operation, we need to pin the pages for writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if ((!odp_supported && ret <= 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) (odp_supported && ret <= 0 && ret != -EOPNOTSUPP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) goto out_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (ret == -EOPNOTSUPP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct rds_mr *local_odp_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (!rs->rs_transport->get_mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) goto out_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) local_odp_mr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) kzalloc(sizeof(*local_odp_mr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (!local_odp_mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) goto out_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) RB_CLEAR_NODE(&local_odp_mr->r_rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) kref_init(&local_odp_mr->r_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) local_odp_mr->r_trans = rs->rs_transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) local_odp_mr->r_sock = rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) local_odp_mr->r_trans_private =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) rs->rs_transport->get_mr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) NULL, 0, rs, &local_odp_mr->r_key, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) iov->addr, iov->bytes, ODP_VIRTUAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (IS_ERR(local_odp_mr->r_trans_private)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ret = IS_ERR(local_odp_mr->r_trans_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) rdsdebug("get_mr ret %d %p\"", ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) local_odp_mr->r_trans_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) kfree(local_odp_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto out_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) rdsdebug("Need odp; local_odp_mr %p trans_private %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) local_odp_mr, local_odp_mr->r_trans_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) op->op_odp_mr = local_odp_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) op->op_odp_addr = iov->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) nr_bytes, nr, iov->bytes, iov->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) nr_bytes += iov->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) for (j = 0; j < nr; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned int offset = iov->addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) sg = &op->op_sg[op->op_nents + j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) sg_set_page(sg, pages[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) sg_dma_len(sg) = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) sg->offset, sg->length, iov->addr, iov->bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) iov->addr += sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) iov->bytes -= sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) op->op_nents += nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (nr_bytes > args->remote_vec.bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) nr_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) (unsigned int) args->remote_vec.bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) goto out_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) op->op_bytes = nr_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) out_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) out_ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) rds_rdma_free_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) rds_stats_inc(s_send_rdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * The application wants us to pass an RDMA destination (aka MR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * to the remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct cmsghdr *cmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct rds_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) u32 r_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) rm->m_rdma_cookie != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /* We are reusing a previously mapped MR here. Most likely, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * application has written to the buffer, so we need to explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * flush those writes to RAM. Otherwise the HCA may not see them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * when doing a DMA from that buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) spin_lock_irqsave(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (!mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) err = -EINVAL; /* invalid r_key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) kref_get(&mr->r_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) mr->r_trans->sync_mr(mr->r_trans_private,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) rm->rdma.op_rdma_mr = mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * The application passes us an address range it wants to enable RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * to/from. We map the area, and save the <R_Key,offset> pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * in rm->m_rdma_cookie. This causes it to be sent along to the peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * in an extension header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct cmsghdr *cmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) rm->m_rdma_cookie != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) &rm->rdma.op_rdma_mr, rm->m_conn_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * Fill in rds_message for an atomic request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct cmsghdr *cmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct rds_atomic_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) || rm->atomic.op_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) args = CMSG_DATA(cmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /* Nonmasked & masked cmsg ops converted to masked hw ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) switch (cmsg->cmsg_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) case RDS_CMSG_ATOMIC_FADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) rm->atomic.op_m_fadd.add = args->fadd.add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) rm->atomic.op_m_fadd.nocarry_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) case RDS_CMSG_MASKED_ATOMIC_FADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) rm->atomic.op_m_fadd.add = args->m_fadd.add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) case RDS_CMSG_ATOMIC_CSWP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) rm->atomic.op_m_cswp.compare = args->cswp.compare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) rm->atomic.op_m_cswp.swap = args->cswp.swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) rm->atomic.op_m_cswp.compare_mask = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) rm->atomic.op_m_cswp.swap_mask = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) case RDS_CMSG_MASKED_ATOMIC_CSWP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) BUG(); /* should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) rm->atomic.op_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) rm->atomic.op_recverr = rs->rs_recverr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (IS_ERR(rm->atomic.op_sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ret = PTR_ERR(rm->atomic.op_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* verify 8 byte-aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (args->local_addr & 0x7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ret = rds_pin_pages(args->local_addr, 1, &page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (ret != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (rm->atomic.op_notify || rm->atomic.op_recverr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /* We allocate an uninitialized notifier here, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * we don't want to do that in the completion handler. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * would have to use GFP_ATOMIC there, and don't want to deal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * with failed allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (!rm->atomic.op_notifier) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) rm->atomic.op_notifier->n_user_token = args->user_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) unpin_user_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) rm->atomic.op_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) kfree(rm->atomic.op_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }