Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  *   fs/cifs/transport.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *   Copyright (C) International Business Machines  Corp., 2002,2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *   Author(s): Steve French (sfrench@us.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *   Jeremy Allison (jra@samba.org) 2006.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *   This library is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *   it under the terms of the GNU Lesser General Public License as published
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *   by the Free Software Foundation; either version 2.1 of the License, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *   (at your option) any later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *   This library is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *   the GNU Lesser General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *   You should have received a copy of the GNU Lesser General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *   along with this library; if not, write to the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/bvec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "cifspdu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "cifsglob.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "cifsproto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "cifs_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include "smb2proto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include "smbdirect.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) /* Max number of iovectors we can use off the stack when sending requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define CIFS_MAX_IOV_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) cifs_wake_up_task(struct mid_q_entry *mid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	wake_up_process(mid->callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) struct mid_q_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	struct mid_q_entry *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	if (server == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	memset(temp, 0, sizeof(struct mid_q_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	kref_init(&temp->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	temp->mid = get_mid(smb_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	temp->pid = current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	temp->command = cpu_to_le16(smb_buffer->Command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	/* when mid allocated can be before when sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	temp->when_alloc = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	temp->server = server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	 * The default is for the mid to be synchronous, so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	 * default callback just wakes up the current task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	get_task_struct(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	temp->creator = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	temp->callback = cifs_wake_up_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	temp->callback_data = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	atomic_inc(&midCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	temp->mid_state = MID_REQUEST_ALLOCATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	return temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static void _cifs_mid_q_entry_release(struct kref *refcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct mid_q_entry *midEntry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 			container_of(refcount, struct mid_q_entry, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #ifdef CONFIG_CIFS_STATS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	__le16 command = midEntry->server->vals->lock_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	__u16 smb_cmd = le16_to_cpu(midEntry->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	unsigned long now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	unsigned long roundtrip_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	struct TCP_Server_Info *server = midEntry->server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	    server->ops->handle_cancelled_mid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		server->ops->handle_cancelled_mid(midEntry, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	midEntry->mid_state = MID_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	atomic_dec(&midCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (midEntry->large_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		cifs_buf_release(midEntry->resp_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		cifs_small_buf_release(midEntry->resp_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #ifdef CONFIG_CIFS_STATS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	if (now < midEntry->when_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		cifs_server_dbg(VFS, "Invalid mid allocation time\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	roundtrip_time = now - midEntry->when_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			server->slowest_cmd[smb_cmd] = roundtrip_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 			server->fastest_cmd[smb_cmd] = roundtrip_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 			if (server->slowest_cmd[smb_cmd] < roundtrip_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 				server->slowest_cmd[smb_cmd] = roundtrip_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 			else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 				server->fastest_cmd[smb_cmd] = roundtrip_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		cifs_stats_inc(&server->num_cmds[smb_cmd]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		server->time_per_cmd[smb_cmd] += roundtrip_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	 * commands taking longer than one second (default) can be indications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	 * that something is wrong, unless it is quite a slow link or a very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	 * busy server. Note that this calc is unlikely or impossible to wrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	 * as long as slow_rsp_threshold is not set way above recommended max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	 * value (32767 ie 9 hours) and is generally harmless even if wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	 * since only affects debug counters - so leaving the calc as simple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	 * comparison rather than doing multiple conversions and overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	 * checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	if ((slow_rsp_threshold != 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	    time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	    (midEntry->command != command)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		 * NB: le16_to_cpu returns unsigned so can not be negative below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 			cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 			       midEntry->when_sent, midEntry->when_received);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		if (cifsFYI & CIFS_TIMER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			pr_debug("slow rsp: cmd %d mid %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 				 midEntry->command, midEntry->mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 			cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 				  now - midEntry->when_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 				  now - midEntry->when_sent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 				  now - midEntry->when_received);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	put_task_struct(midEntry->creator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	mempool_free(midEntry, cifs_mid_poolp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	spin_lock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) void DeleteMidQEntry(struct mid_q_entry *midEntry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	cifs_mid_q_entry_release(midEntry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) cifs_delete_mid(struct mid_q_entry *mid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	spin_lock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	if (!(mid->mid_flags & MID_DELETED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		list_del_init(&mid->qhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		mid->mid_flags |= MID_DELETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	DeleteMidQEntry(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  * smb_send_kvec - send an array of kvecs to the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * @server:	Server to send the data to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * @smb_msg:	Message to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  * @sent:	amount of data sent on socket is stored here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * Our basic "send data to server" function. Should be called with srv_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * held. The caller is responsible for handling the results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	      size_t *sent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	struct socket *ssocket = server->ssocket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	*sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	smb_msg->msg_namelen = sizeof(struct sockaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	smb_msg->msg_control = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	smb_msg->msg_controllen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	if (server->noblocksnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		smb_msg->msg_flags = MSG_NOSIGNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	while (msg_data_left(smb_msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		 * If blocking send, we try 3 times, since each can block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		 * for 5 seconds. For nonblocking  we have to try more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		 * but wait increasing amounts of time allowing time for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		 * socket to clear.  The overall time we wait in either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		 * case to send on the socket is about 15 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		 * Similarly we wait for 15 seconds for a response from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		 * the server in SendReceive[2] for the server to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		 * a response back for most types of requests (except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		 * SMB Write past end of file which can be slow, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		 * blocking lock operations). NFS waits slightly longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		 * than CIFS, but this can make it take longer for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		 * nonresponsive servers to be detected and 15 seconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		 * is more than enough time for modern networks to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		 * send a packet.  In most cases if we fail to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		 * after the retries we will kill the socket and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		 * reconnect which may clear the network problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		rc = sock_sendmsg(ssocket, smb_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		if (rc == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 			if (retries >= 14 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			    (!server->noblocksnd && (retries > 2))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 				cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 					 ssocket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 				return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 			msleep(1 << retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			/* should never happen, letting socket clear before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			   retrying is our only obvious option here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 			cifs_server_dbg(VFS, "tcp sent no data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 			msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		/* send was at least partially successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		*sent += rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		retries = 0; /* in case we get ENOSPC on the next send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	struct kvec *iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	int nvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	unsigned long buflen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	if (server->vals->header_preamble_size == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	    rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		iov = &rqst->rq_iov[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		nvec = rqst->rq_nvec - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		iov = rqst->rq_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		nvec = rqst->rq_nvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	/* total up iov array first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	for (i = 0; i < nvec; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		buflen += iov[i].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	 * Add in the page array if there is one. The caller needs to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	 * PAGE_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	if (rqst->rq_npages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		if (rqst->rq_npages == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 			buflen += rqst->rq_tailsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			 * If there is more than one page, calculate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			 * buffer length based on rq_offset and rq_tailsz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 					rqst->rq_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			buflen += rqst->rq_tailsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	return buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		struct smb_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	struct kvec *iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	int n_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	unsigned int send_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	sigset_t mask, oldmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	size_t total_len = 0, sent, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	struct socket *ssocket = server->ssocket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	struct msghdr smb_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	__be32 rfc1002_marker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	if (cifs_rdma_enabled(server)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		/* return -EAGAIN when connecting or reconnecting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		if (server->smbd_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			rc = smbd_send(server, num_rqst, rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		goto smbd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	if (ssocket == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	if (fatal_signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		cifs_dbg(FYI, "signal pending before send request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	/* cork the socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	tcp_sock_set_cork(ssocket->sk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	for (j = 0; j < num_rqst; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		send_length += smb_rqst_len(server, &rqst[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	rfc1002_marker = cpu_to_be32(send_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 * We should not allow signals to interrupt the network send because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	 * any partial send will cause session reconnects thus increasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	 * latency of system calls and overload a server with unnecessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	 * requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	sigfillset(&mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	sigprocmask(SIG_BLOCK, &mask, &oldmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	/* Generate a rfc1002 marker for SMB2+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	if (server->vals->header_preamble_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		struct kvec hiov = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			.iov_base = &rfc1002_marker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			.iov_len  = 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		rc = smb_send_kvec(server, &smb_msg, &sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			goto unmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		total_len += sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		send_length += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	for (j = 0; j < num_rqst; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		iov = rqst[j].rq_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		n_vec = rqst[j].rq_nvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		for (i = 0; i < n_vec; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			dump_smb(iov[i].iov_base, iov[i].iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			size += iov[i].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		rc = smb_send_kvec(server, &smb_msg, &sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			goto unmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		total_len += sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		/* now walk the page array and send each page in it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		for (i = 0; i < rqst[j].rq_npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			bvec.bv_page = rqst[j].rq_pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 					     &bvec.bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			iov_iter_bvec(&smb_msg.msg_iter, WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 				      &bvec, 1, bvec.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 			rc = smb_send_kvec(server, &smb_msg, &sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 			if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			total_len += sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) unmask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	sigprocmask(SIG_SETMASK, &oldmask, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	 * If signal is pending but we have already sent the whole packet to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 * the server we need to return success status to allow a corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	 * mid entry to be kept in the pending requests queue thus allowing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 * to handle responses from the server by the client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	 * If only part of the packet has been sent there is no need to hide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	 * interrupt because the session will be reconnected anyway, so there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	 * won't be any response from the server to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	if (signal_pending(current) && (total_len != send_length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		cifs_dbg(FYI, "signal is pending after attempt to send\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		rc = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	/* uncork it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	tcp_sock_set_cork(ssocket->sk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	if ((total_len > 0) && (total_len != send_length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			 send_length, total_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		 * If we have only sent part of an SMB then the next SMB could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		 * be taken as the remainder of this one. We need to kill the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		 * socket so the server throws away the partial SMB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		server->tcpStatus = CifsNeedReconnect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		trace_smb3_partial_send_reconnect(server->CurrentMid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 						  server->hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) smbd_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if (rc < 0 && rc != -EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			 rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	else if (rc > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	      struct smb_rqst *rqst, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	struct kvec iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	struct smb2_transform_hdr *tr_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	struct smb_rqst cur_rqst[MAX_COMPOUND];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (!(flags & CIFS_TRANSFORM_REQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		return __smb_send_rqst(server, num_rqst, rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	if (num_rqst > MAX_COMPOUND - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	if (!server->ops->init_transform_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (!tr_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	memset(&iov, 0, sizeof(iov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	memset(tr_hdr, 0, sizeof(*tr_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	iov.iov_base = tr_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	iov.iov_len = sizeof(*tr_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	cur_rqst[0].rq_iov = &iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	cur_rqst[0].rq_nvec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	rc = server->ops->init_transform_rq(server, num_rqst + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 					    &cur_rqst[0], rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	kfree(tr_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	 unsigned int smb_buf_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	struct kvec iov[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	struct smb_rqst rqst = { .rq_iov = iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 				 .rq_nvec = 2 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	iov[0].iov_base = smb_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	iov[0].iov_len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	iov[1].iov_base = (char *)smb_buffer + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	iov[1].iov_len = smb_buf_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	return __smb_send_rqst(server, 1, &rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		      const int timeout, const int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		      unsigned int *instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	int *credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	int optype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	long int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (timeout < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		t = MAX_JIFFY_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		t = msecs_to_jiffies(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	optype = flags & CIFS_OP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	*instance = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	credits = server->ops->get_credits_field(server, optype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	/* Since an echo is already inflight, no need to wait to send another */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (*credits <= 0 && optype == CIFS_ECHO_OP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	spin_lock(&server->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		/* oplock breaks must not be held up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		server->in_flight++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		if (server->in_flight > server->max_in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			server->max_in_flight = server->in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		*credits -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		*instance = server->reconnect_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		spin_unlock(&server->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		if (*credits < num_credits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			spin_unlock(&server->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			cifs_num_waiters_inc(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			rc = wait_event_killable_timeout(server->request_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 				has_credits(server, credits, num_credits), t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			cifs_num_waiters_dec(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 				trace_smb3_credit_timeout(server->CurrentMid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 					server->hostname, num_credits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 				cifs_server_dbg(VFS, "wait timed out after %d ms\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 					 timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 				return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			if (rc == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 				return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			spin_lock(&server->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			if (server->tcpStatus == CifsExiting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 				spin_unlock(&server->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 				return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			 * For normal commands, reserve the last MAX_COMPOUND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			 * credits to compound requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			 * Otherwise these compounds could be permanently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 			 * starved for credits by single-credit requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			 * To prevent spinning CPU, block this thread until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			 * there are >MAX_COMPOUND credits available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 			 * But only do this is we already have a lot of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			 * credits in flight to avoid triggering this check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 			 * for servers that are slow to hand out credits on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			 * new sessions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			if (!optype && num_credits == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			    server->in_flight > 2 * MAX_COMPOUND &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 			    *credits <= MAX_COMPOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 				spin_unlock(&server->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 				cifs_num_waiters_inc(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 				rc = wait_event_killable_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 					server->request_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 					has_credits(server, credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 						    MAX_COMPOUND + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 					t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 				cifs_num_waiters_dec(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 				if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 					trace_smb3_credit_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 						server->CurrentMid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 						server->hostname, num_credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 						0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 					cifs_server_dbg(VFS, "wait timed out after %d ms\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 						 timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 					return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 				if (rc == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 					return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 				spin_lock(&server->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			 * Can not count locking commands against total
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			 * as they are allowed to block on server.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			/* update # of requests on the wire to server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 				*credits -= num_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 				server->in_flight += num_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 				if (server->in_flight > server->max_in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 					server->max_in_flight = server->in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 				*instance = server->reconnect_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			spin_unlock(&server->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) wait_for_free_request(struct TCP_Server_Info *server, const int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		      unsigned int *instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	return wait_for_free_credits(server, 1, -1, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 				     instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) wait_for_compound_request(struct TCP_Server_Info *server, int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			  const int flags, unsigned int *instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	int *credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	spin_lock(&server->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (*credits < num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		 * If the server is tight on resources or just gives us less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		 * credits for other reasons (e.g. requests are coming out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		 * order and the server delays granting more credits until it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		 * processes a missing mid) and we exhausted most available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		 * credits there may be situations when we try to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		 * a compound request but we don't have enough credits. At this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		 * point the client needs to decide if it should wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		 * additional credits or fail the request. If at least one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		 * request is in flight there is a high probability that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		 * server will return enough credits to satisfy this compound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		 * request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		 * Return immediately if no requests in flight since we will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		 * stuck on waiting for credits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		if (server->in_flight == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			spin_unlock(&server->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	spin_unlock(&server->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	return wait_for_free_credits(server, num, 60000, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				     instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		      unsigned int *num, struct cifs_credits *credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	*num = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	credits->value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	credits->instance = server->reconnect_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			struct mid_q_entry **ppmidQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	if (ses->server->tcpStatus == CifsExiting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	if (ses->server->tcpStatus == CifsNeedReconnect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (ses->status == CifsNew) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			(in_buf->Command != SMB_COM_NEGOTIATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		/* else ok - we are setting up session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (ses->status == CifsExiting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		/* check if SMB session is bad because we are setting it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		/* else ok - we are shutting down session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	if (*ppmidQ == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	spin_lock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	error = wait_event_freezekillable_unsafe(server->response_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 				    midQ->mid_state != MID_REQUEST_SUBMITTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) struct mid_q_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	struct mid_q_entry *mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (rqst->rq_iov[0].iov_len != 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	/* enable signing if server requires it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	if (server->sign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	mid = AllocMidQEntry(hdr, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (mid == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		DeleteMidQEntry(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	return mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771)  * Send a SMB request and set the callback function in the mid to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772)  * the result. Caller is responsible for dealing with timeouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		mid_receive_t *receive, mid_callback_t *callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		mid_handle_t *handle, void *cbdata, const int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		const struct cifs_credits *exist_credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	struct mid_q_entry *mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	struct cifs_credits credits = { .value = 0, .instance = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	unsigned int instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	int optype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	optype = flags & CIFS_OP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if ((flags & CIFS_HAS_CREDITS) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		rc = wait_for_free_request(server, flags, &instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		credits.value = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		credits.instance = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		instance = exist_credits->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	mutex_lock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	 * We can't use credits obtained from the previous session to send this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	 * request. Check if there were reconnects after we obtained credits and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	 * return -EAGAIN in such cases to let callers handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (instance != server->reconnect_instance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		add_credits_and_wake_if(server, &credits, optype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	mid = server->ops->setup_async_request(server, rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	if (IS_ERR(mid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		add_credits_and_wake_if(server, &credits, optype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		return PTR_ERR(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	mid->receive = receive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	mid->callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	mid->callback_data = cbdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	mid->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	mid->mid_state = MID_REQUEST_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	/* put it on the pending_mid_q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	spin_lock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	list_add_tail(&mid->qhead, &server->pending_mid_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	 * Need to store the time in mid before calling I/O. For call_async,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	 * I/O response may come back and free the mid entry on another thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	cifs_save_when_sent(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	cifs_in_send_inc(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	rc = smb_send_rqst(server, 1, rqst, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	cifs_in_send_dec(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		revert_current_mid(server, mid->credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		server->sequence_number -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		cifs_delete_mid(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	add_credits_and_wake_if(server, &credits, optype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854)  * Send an SMB Request.  No response info (other than return code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855)  * needs to be parsed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  * flags indicate the type of request buffer and how long to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  * and whether to log NT STATUS code (error) before mapping it to POSIX error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		 char *in_buf, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	struct kvec iov[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	struct kvec rsp_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	int resp_buf_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	iov[0].iov_base = in_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	flags |= CIFS_NO_RSP_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	spin_lock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	switch (mid->mid_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	case MID_RESPONSE_RECEIVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	case MID_RETRY_NEEDED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	case MID_RESPONSE_MALFORMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	case MID_SHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		rc = -EHOSTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		if (!(mid->mid_flags & MID_DELETED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			list_del_init(&mid->qhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			mid->mid_flags |= MID_DELETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			 __func__, mid->mid, mid->mid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	DeleteMidQEntry(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	    struct mid_q_entry *mid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	return server->ops->send_cancel ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 				server->ops->send_cancel(server, rqst, mid) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		   bool log_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	dump_smb(mid->resp_buf, min_t(u32, 92, len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	/* convert the length into a more usable form */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	if (server->sign) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		struct kvec iov[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		struct smb_rqst rqst = { .rq_iov = iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 					 .rq_nvec = 2 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		iov[0].iov_base = mid->resp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		iov[0].iov_len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		iov[1].iov_base = (char *)mid->resp_buf + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		iov[1].iov_len = len - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		/* FIXME: add code to kill session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		rc = cifs_verify_signature(&rqst, server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 					   mid->sequence_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 				 rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	/* BB special case reconnect tid and uid here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	return map_and_check_smb_error(mid, log_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) struct mid_q_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		   struct smb_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	struct mid_q_entry *mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (rqst->rq_iov[0].iov_len != 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	rc = allocate_mid(ses, hdr, &mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		cifs_delete_mid(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	return mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) cifs_compound_callback(struct mid_q_entry *mid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	struct TCP_Server_Info *server = mid->server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	struct cifs_credits credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	credits.value = server->ops->get_credits(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	credits.instance = server->reconnect_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	add_credits(server, &credits, mid->optype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) cifs_compound_last_callback(struct mid_q_entry *mid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	cifs_compound_callback(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	cifs_wake_up_task(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) cifs_cancelled_callback(struct mid_q_entry *mid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	cifs_compound_callback(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	DeleteMidQEntry(mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  * Return a channel (master if none) of @ses that can be used to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  * regular requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * If we are currently binding a new channel (negprot/sess.setup),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  * return the new incomplete channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	uint index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	if (!ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	if (!ses->binding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		/* round robin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		if (ses->chan_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			index = (uint)atomic_inc_return(&ses->chan_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			index %= ses->chan_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		return ses->chans[index].server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		return cifs_ses_server(ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		   struct TCP_Server_Info *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		   const int flags, const int num_rqst, struct smb_rqst *rqst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		   int *resp_buf_type, struct kvec *resp_iov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	int i, j, optype, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	struct mid_q_entry *midQ[MAX_COMPOUND];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	bool cancelled_mid[MAX_COMPOUND] = {false};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	struct cifs_credits credits[MAX_COMPOUND] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		{ .value = 0, .instance = 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	unsigned int instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	optype = flags & CIFS_OP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	for (i = 0; i < num_rqst; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	if (!ses || !ses->server || !server) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		cifs_dbg(VFS, "Null session\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	if (server->tcpStatus == CifsExiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	 * Wait for all the requests to become available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	 * This approach still leaves the possibility to be stuck waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	 * credits if the server doesn't grant credits to the outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	 * requests and if the client is completely idle, not generating any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	 * other requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	 * This can be handled by the eventual session reconnect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	rc = wait_for_compound_request(server, num_rqst, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				       &instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	for (i = 0; i < num_rqst; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		credits[i].value = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		credits[i].instance = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	 * Make sure that we sign in the same order that we send on this socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	 * and avoid races inside tcp sendmsg code that could cause corruption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	 * of smb data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	mutex_lock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	 * All the parts of the compound chain belong obtained credits from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	 * same session. We can not use credits obtained from the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	 * session to send this request. Check if there were reconnects after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	 * we obtained credits and return -EAGAIN in such cases to let callers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	 * handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (instance != server->reconnect_instance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		for (j = 0; j < num_rqst; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			add_credits(server, &credits[j], optype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	for (i = 0; i < num_rqst; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		if (IS_ERR(midQ[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			revert_current_mid(server, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 				cifs_delete_mid(midQ[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			/* Update # of requests on wire to server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			for (j = 0; j < num_rqst; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 				add_credits(server, &credits[j], optype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			return PTR_ERR(midQ[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		midQ[i]->optype = optype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		 * Invoke callback for every part of the compound chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		 * to calculate credits properly. Wake up this thread only when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		 * the last element is received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		if (i < num_rqst - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			midQ[i]->callback = cifs_compound_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			midQ[i]->callback = cifs_compound_last_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	cifs_in_send_inc(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	rc = smb_send_rqst(server, num_rqst, rqst, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	cifs_in_send_dec(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	for (i = 0; i < num_rqst; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		cifs_save_when_sent(midQ[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		revert_current_mid(server, num_rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		server->sequence_number -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	 * If sending failed for some reason or it is an oplock break that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	 * will not receive a response to - return credits back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		for (i = 0; i < num_rqst; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			add_credits(server, &credits[i], optype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	 * At this point the request is passed to the network stack - we assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	 * that any credits taken from the server structure on the client have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	 * been spent and we can't return them back. Once we receive responses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	 * we will collect credits granted by the server in the mid callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	 * and add those credits to the server structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	 * Compounding is never used during session establish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		mutex_lock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		smb311_update_preauth_hash(ses, rqst[0].rq_iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 					   rqst[0].rq_nvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	for (i = 0; i < num_rqst; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		rc = wait_for_response(server, midQ[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		for (; i < num_rqst; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			send_cancel(server, &rqst[i], midQ[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 			spin_lock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 			midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 				midQ[i]->callback = cifs_cancelled_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 				cancelled_mid[i] = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 				credits[i].value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	for (i = 0; i < num_rqst; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		rc = cifs_sync_mid_result(midQ[i], server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 			/* mark this mid as cancelled to not free it below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			cancelled_mid[i] = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		if (!midQ[i]->resp_buf ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			cifs_dbg(FYI, "Bad MID state?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		buf = (char *)midQ[i]->resp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		resp_iov[i].iov_base = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			server->vals->header_preamble_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		if (midQ[i]->large_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			resp_buf_type[i] = CIFS_LARGE_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			resp_buf_type[i] = CIFS_SMALL_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		rc = server->ops->check_receive(midQ[i], server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 						     flags & CIFS_LOG_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		/* mark it so buf will not be freed by cifs_delete_mid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		if ((flags & CIFS_NO_RSP_BUF) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			midQ[i]->resp_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	 * Compounding is never used during session establish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		struct kvec iov = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 			.iov_base = resp_iov[0].iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			.iov_len = resp_iov[0].iov_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		mutex_lock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		smb311_update_preauth_hash(ses, &iov, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	 * This will dequeue all mids. After this it is important that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	 * demultiplex_thread will not process any of these mids any futher.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	 * This is prevented above by using a noop callback that will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	 * wake this thread except for the very last PDU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	for (i = 0; i < num_rqst; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		if (!cancelled_mid[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			cifs_delete_mid(midQ[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	       struct TCP_Server_Info *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	       struct kvec *resp_iov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	return compound_send_recv(xid, ses, server, flags, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 				  rqst, resp_buf_type, resp_iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) SendReceive2(const unsigned int xid, struct cifs_ses *ses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	     const int flags, struct kvec *resp_iov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	struct smb_rqst rqst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		if (!new_iov) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			/* otherwise cifs_send_recv below sets resp_buf_type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 			*resp_buf_type = CIFS_NO_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		new_iov = s_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	/* 1st iov is a RFC1001 length followed by the rest of the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	new_iov[0].iov_base = new_iov[1].iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	new_iov[0].iov_len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	new_iov[1].iov_base += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	new_iov[1].iov_len -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	memset(&rqst, 0, sizeof(struct smb_rqst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	rqst.rq_iov = new_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	rqst.rq_nvec = n_vec + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	rc = cifs_send_recv(xid, ses, ses->server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			    &rqst, resp_buf_type, flags, resp_iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		kfree(new_iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) SendReceive(const unsigned int xid, struct cifs_ses *ses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	    int *pbytes_returned, const int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	struct mid_q_entry *midQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	struct cifs_credits credits = { .value = 1, .instance = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	struct TCP_Server_Info *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	if (ses == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		cifs_dbg(VFS, "Null smb session\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	server = ses->server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	if (server == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		cifs_dbg(VFS, "Null tcp session\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	if (server->tcpStatus == CifsExiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	/* Ensure that we do not send more than 50 overlapping requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	   to the same server. We may make this configurable later or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	   use ses->maxReq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	rc = wait_for_free_request(server, flags, &credits.instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	/* make sure that we sign in the same order that we send on this socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	   and avoid races inside tcp sendmsg code that could cause corruption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	   of smb data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	mutex_lock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	rc = allocate_mid(ses, in_buf, &midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		/* Update # of requests on wire to server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		add_credits(server, &credits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	midQ->mid_state = MID_REQUEST_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	cifs_in_send_inc(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	rc = smb_send(server, in_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	cifs_in_send_dec(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	cifs_save_when_sent(midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		server->sequence_number -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	rc = wait_for_response(server, midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		send_cancel(server, &rqst, midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		spin_lock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			/* no longer considered to be "in-flight" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			midQ->callback = DeleteMidQEntry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 			spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			add_credits(server, &credits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	rc = cifs_sync_mid_result(midQ, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		add_credits(server, &credits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	if (!midQ->resp_buf || !out_buf ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		cifs_server_dbg(VFS, "Bad MID state?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	rc = cifs_check_receive(midQ, server, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	cifs_delete_mid(midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	add_credits(server, &credits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)    blocking lock to return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 			struct smb_hdr *in_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			struct smb_hdr *out_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	int bytes_returned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	struct cifs_ses *ses = tcon->ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	/* We just modify the current in_buf to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	   the type of lock from LOCKING_ANDX_SHARED_LOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	   LOCKING_ANDX_CANCEL_LOCK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	pSMB->Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	pSMB->hdr.Mid = get_next_mid(ses->server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	return SendReceive(xid, ses, in_buf, out_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 			&bytes_returned, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	    int *pbytes_returned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	int rstart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	struct mid_q_entry *midQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	struct cifs_ses *ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	unsigned int instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	struct TCP_Server_Info *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	if (tcon == NULL || tcon->ses == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		cifs_dbg(VFS, "Null smb session\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	ses = tcon->ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	server = ses->server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	if (server == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		cifs_dbg(VFS, "Null tcp session\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	if (server->tcpStatus == CifsExiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	/* Ensure that we do not send more than 50 overlapping requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	   to the same server. We may make this configurable later or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	   use ses->maxReq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			      len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	/* make sure that we sign in the same order that we send on this socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	   and avoid races inside tcp sendmsg code that could cause corruption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	   of smb data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	mutex_lock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	rc = allocate_mid(ses, in_buf, &midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		cifs_delete_mid(midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	midQ->mid_state = MID_REQUEST_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	cifs_in_send_inc(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	rc = smb_send(server, in_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	cifs_in_send_dec(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	cifs_save_when_sent(midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		server->sequence_number -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	mutex_unlock(&server->srv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		cifs_delete_mid(midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	/* Wait for a reply - allow signals to interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	rc = wait_event_interruptible(server->response_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		((server->tcpStatus != CifsGood) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		 (server->tcpStatus != CifsNew)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	/* Were we interrupted by a signal ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	if ((rc == -ERESTARTSYS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		((server->tcpStatus == CifsGood) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		 (server->tcpStatus == CifsNew))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 			   blocking lock to return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 			rc = send_cancel(server, &rqst, midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 			if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 				cifs_delete_mid(midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 				return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			   to cause the blocking lock to return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 			/* If we get -ENOLCK back the lock may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 			   already been removed. Don't exit in this case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			if (rc && rc != -ENOLCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 				cifs_delete_mid(midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 				return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		rc = wait_for_response(server, midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			send_cancel(server, &rqst, midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			spin_lock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 				/* no longer considered to be "in-flight" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 				midQ->callback = DeleteMidQEntry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 				spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 				return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 			spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		/* We got the response - restart system call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		rstart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	rc = cifs_sync_mid_result(midQ, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	/* rcvd frame is ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		cifs_tcon_dbg(VFS, "Bad MID state?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	rc = cifs_check_receive(midQ, server, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	cifs_delete_mid(midQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	if (rstart && rc == -EACCES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }