^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) (c) 2007 Network Appliance, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) (c) 2009 NetApp. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) NetApp provides this source code under the GPL v2 License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) The GPL v2 license is available at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) https://opensource.org/licenses/gpl-license.php.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) ******************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/sunrpc/xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/sunrpc/bc_xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define RPCDBG_FACILITY RPCDBG_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define BC_MAX_SLOTS 64U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return BC_MAX_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Helper routines that track the number of preallocation elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * on the transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return xprt->bc_alloc_count < xprt->bc_alloc_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * Free the preallocated rpc_rqst structure and the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * buffers hanging off of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void xprt_free_allocation(struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct xdr_buf *xbufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) dprintk("RPC: free allocations for req= %p\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) xbufp = &req->rq_rcv_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) free_page((unsigned long)xbufp->head[0].iov_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) xbufp = &req->rq_snd_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) free_page((unsigned long)xbufp->head[0].iov_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* Preallocate one XDR receive buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) page = alloc_page(gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (page == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) xdr_buf_init(buf, page_address(page), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct rpc_rqst *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Pre-allocate one backchannel rpc_rqst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) req = kzalloc(sizeof(*req), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (req == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) req->rq_xprt = xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) INIT_LIST_HEAD(&req->rq_bc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Preallocate one XDR receive buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) printk(KERN_ERR "Failed to create bc receive xbuf\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) req->rq_rcv_buf.len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Preallocate one XDR send buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) printk(KERN_ERR "Failed to create bc snd xbuf\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) xprt_free_allocation(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * Preallocate up to min_reqs structures and related buffers for use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * by the backchannel. This function can be called multiple times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * when creating new sessions that use the same rpc_xprt. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * preallocated buffers are added to the pool of resources used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * the rpc_xprt. Any one of these resources may be used by an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * incoming callback request. It's up to the higher levels in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * stack to enforce that the maximum number of session slots is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * being exceeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Some callback arguments can be large. For example, a pNFS server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * using multiple deviceids. The list can be unbound, but the client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * has the ability to tell the server the maximum size of the callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * requests. Each deviceID is 16 bytes, so allocate one page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * for the arguments to have enough room to receive a number of these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * deviceIDs. The NFS client indicates to the pNFS server that its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * callback requests can be up to 4096 bytes in size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!xprt->ops->bc_setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return xprt->ops->bc_setup(xprt, min_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct rpc_rqst *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct list_head tmp_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) dprintk("RPC: setup backchannel transport\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (min_reqs > BC_MAX_SLOTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) min_reqs = BC_MAX_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * We use a temporary list to keep track of the preallocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * buffers. Once we're done building the list we splice it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * into the backchannel preallocation list off of the rpc_xprt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * struct. This helps minimize the amount of time the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * lock is held on the rpc_xprt struct. It also makes cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * easier in case of memory allocation errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) INIT_LIST_HEAD(&tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) for (i = 0; i < min_reqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* Pre-allocate one backchannel rpc_rqst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) printk(KERN_ERR "Failed to create bc rpc_rqst\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Add the allocated buffer to the tmp list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) dprintk("RPC: adding req= %p\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) list_add(&req->rq_bc_pa_list, &tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Add the temporary list to the backchannel preallocation list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) spin_lock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) list_splice(&tmp_list, &xprt->bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) xprt->bc_alloc_count += min_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) xprt->bc_alloc_max += min_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) atomic_add(min_reqs, &xprt->bc_slot_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) spin_unlock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dprintk("RPC: setup backchannel transport done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * Memory allocation failed, free the temporary list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) while (!list_empty(&tmp_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) req = list_first_entry(&tmp_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct rpc_rqst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) rq_bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) list_del(&req->rq_bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) xprt_free_allocation(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) dprintk("RPC: setup backchannel transport failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * @xprt: the transport holding the preallocated strucures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * @max_reqs: the maximum number of preallocated structures to destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Since these structures may have been allocated by multiple calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * to xprt_setup_backchannel, we only destroy up to the maximum number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * of reqs specified by the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (xprt->ops->bc_destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) xprt->ops->bc_destroy(xprt, max_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct rpc_rqst *req = NULL, *tmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) dprintk("RPC: destroy backchannel transport\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (max_reqs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) spin_lock_bh(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) dprintk("RPC: req=%p\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) list_del(&req->rq_bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) xprt_free_allocation(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) xprt->bc_alloc_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) atomic_dec(&xprt->bc_slot_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (--max_reqs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) spin_unlock_bh(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dprintk("RPC: backchannel list empty= %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) list_empty(&xprt->bc_pa_list) ? "true" : "false");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct rpc_rqst *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct rpc_rqst *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) dprintk("RPC: allocate a backchannel request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (list_empty(&xprt->bc_pa_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) goto not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (atomic_read(&xprt->bc_slot_count) >= BC_MAX_SLOTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) xprt->bc_alloc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) atomic_inc(&xprt->bc_slot_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) rq_bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) req->rq_reply_bytes_recvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) sizeof(req->rq_private_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) req->rq_xid = xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) req->rq_connect_cookie = xprt->connect_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) dprintk("RPC: backchannel req=%p\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) not_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * Return the preallocated rpc_rqst structure and XDR buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * associated with this rpc_task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void xprt_free_bc_request(struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct rpc_xprt *xprt = req->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) xprt->ops->bc_free_rqst(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) void xprt_free_bc_rqst(struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct rpc_xprt *xprt = req->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dprintk("RPC: free backchannel req=%p\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) req->rq_connect_cookie = xprt->connect_cookie - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Return it to the list of preallocations so that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * may be reused by a new callback request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_lock_bh(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (xprt_need_to_requeue(xprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) xprt->bc_alloc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) atomic_inc(&xprt->bc_slot_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) spin_unlock_bh(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (req != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * The last remaining session was destroyed while this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * entry was in use. Free the entry and don't attempt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * to add back to the list because there is no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * have anymore preallocated entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) dprintk("RPC: Last session removed req=%p\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) xprt_free_allocation(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * One or more rpc_rqst structure have been preallocated during the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * backchannel setup. Buffer space for the send and private XDR buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * has been preallocated as well. Use xprt_alloc_bc_request to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * to this request. Use xprt_free_bc_request to return it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * We know that we're called in soft interrupt context, grab the spin_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * since there is no need to grab the bottom half spin_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Return an available rpc_rqst, otherwise NULL if non are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct rpc_rqst *req, *new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) spin_lock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (req->rq_connect_cookie != xprt->connect_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (req->rq_xid == xid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) req = xprt_get_bc_request(xprt, xid, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) spin_unlock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (req != new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) xprt_free_allocation(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) } else if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) new = xprt_alloc_bc_req(xprt, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) } while (new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Add callback request to callback list. The callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * service sleeps on the sv_cb_waitq waiting for new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * requests. Wake it up after adding enqueing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct rpc_xprt *xprt = req->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct svc_serv *bc_serv = xprt->bc_serv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) spin_lock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) list_del(&req->rq_bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) xprt->bc_alloc_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) spin_unlock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) req->rq_private_buf.len = copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dprintk("RPC: add callback request to list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) xprt_get(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) spin_lock(&bc_serv->sv_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) wake_up(&bc_serv->sv_cb_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) spin_unlock(&bc_serv->sv_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }