^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * fs/cifs/misc.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) International Business Machines Corp., 2002,2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author(s): Steve French (sfrench@us.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This library is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * it under the terms of the GNU Lesser General Public License as published
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * by the Free Software Foundation; either version 2.1 of the License, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * (at your option) any later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This library is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * the GNU Lesser General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * You should have received a copy of the GNU Lesser General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * along with this library; if not, write to the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "cifspdu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "cifsglob.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "cifsproto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "cifs_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "smberr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "nterr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "cifs_unicode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "smb2pdu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "cifsfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifdef CONFIG_CIFS_DFS_UPCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "dns_resolve.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) extern mempool_t *cifs_sm_req_poolp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) extern mempool_t *cifs_req_poolp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* The xid serves as a useful identifier for each incoming vfs request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) in a similar way to the mid which is useful to track each sent smb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) and CurrentXid can also provide a running counter (although it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) will eventually wrap past zero) of the total vfs operations handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) since the cifs fs was mounted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) _get_xid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) spin_lock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) GlobalTotalActiveXid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* keep high water mark for number of simultaneous ops in filesystem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (GlobalTotalActiveXid > GlobalMaxActiveXid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) GlobalMaxActiveXid = GlobalTotalActiveXid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (GlobalTotalActiveXid > 65000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) cifs_dbg(FYI, "warning: more than 65000 requests active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) xid = GlobalCurrentXid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) _free_xid(unsigned int xid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) spin_lock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* if (GlobalTotalActiveXid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) BUG(); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) GlobalTotalActiveXid--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct cifs_ses *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) sesInfoAlloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct cifs_ses *ret_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (ret_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) atomic_inc(&sesInfoAllocCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ret_buf->status = CifsNew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ++ret_buf->ses_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) INIT_LIST_HEAD(&ret_buf->smb_ses_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) INIT_LIST_HEAD(&ret_buf->tcon_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) mutex_init(&ret_buf->session_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) spin_lock_init(&ret_buf->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return ret_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) sesInfoFree(struct cifs_ses *buf_to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (buf_to_free == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) atomic_dec(&sesInfoAllocCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) kfree(buf_to_free->serverOS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) kfree(buf_to_free->serverDomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) kfree(buf_to_free->serverNOS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) kfree_sensitive(buf_to_free->password);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) kfree(buf_to_free->user_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) kfree(buf_to_free->domainName);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) kfree_sensitive(buf_to_free->auth_key.response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) kfree(buf_to_free->iface_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) kfree_sensitive(buf_to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct cifs_tcon *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) tconInfoAlloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct cifs_tcon *ret_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (!ret_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ret_buf->crfid.fid = kzalloc(sizeof(*ret_buf->crfid.fid), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!ret_buf->crfid.fid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) kfree(ret_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) atomic_inc(&tconInfoAllocCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ret_buf->tidStatus = CifsNew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ++ret_buf->tc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) INIT_LIST_HEAD(&ret_buf->openFileList);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) INIT_LIST_HEAD(&ret_buf->tcon_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) spin_lock_init(&ret_buf->open_file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) mutex_init(&ret_buf->crfid.fid_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_lock_init(&ret_buf->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) atomic_set(&ret_buf->num_local_opens, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) atomic_set(&ret_buf->num_remote_opens, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return ret_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) tconInfoFree(struct cifs_tcon *buf_to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (buf_to_free == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) atomic_dec(&tconInfoAllocCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) kfree(buf_to_free->nativeFileSystem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) kfree_sensitive(buf_to_free->password);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) kfree(buf_to_free->crfid.fid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #ifdef CONFIG_CIFS_DFS_UPCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) kfree(buf_to_free->dfs_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) kfree(buf_to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct smb_hdr *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) cifs_buf_get(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct smb_hdr *ret_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * SMB2 header is bigger than CIFS one - no problems to clean some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * more bytes for CIFS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) size_t buf_size = sizeof(struct smb2_sync_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * We could use negotiated size instead of max_msgsize -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * but it may be more efficient to always alloc same size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * albeit slightly larger than necessary and maxbuffersize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * defaults to this and can not be bigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* clear the first few header bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* for most paths, more is cleared in header_assemble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) memset(ret_buf, 0, buf_size + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) atomic_inc(&bufAllocCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #ifdef CONFIG_CIFS_STATS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) atomic_inc(&totBufAllocCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #endif /* CONFIG_CIFS_STATS2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return ret_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) cifs_buf_release(void *buf_to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (buf_to_free == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) mempool_free(buf_to_free, cifs_req_poolp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) atomic_dec(&bufAllocCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct smb_hdr *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) cifs_small_buf_get(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct smb_hdr *ret_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* We could use negotiated size instead of max_msgsize -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) but it may be more efficient to always alloc same size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) albeit slightly larger than necessary and maxbuffersize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) defaults to this and can not be bigger */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* No need to clear memory here, cleared in header assemble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) atomic_inc(&smBufAllocCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #ifdef CONFIG_CIFS_STATS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) atomic_inc(&totSmBufAllocCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #endif /* CONFIG_CIFS_STATS2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return ret_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) cifs_small_buf_release(void *buf_to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (buf_to_free == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) mempool_free(buf_to_free, cifs_sm_req_poolp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) atomic_dec(&smBufAllocCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) free_rsp_buf(int resp_buftype, void *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (resp_buftype == CIFS_SMALL_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) cifs_small_buf_release(rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) else if (resp_buftype == CIFS_LARGE_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) cifs_buf_release(rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* NB: MID can not be set if treeCon not passed in, in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) case it is responsbility of caller to set the mid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) const struct cifs_tcon *treeCon, int word_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* length of fixed section (word count) in two byte units */)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) char *temp = (char *) buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) buffer->smb_buf_length = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) (2 * word_count) + sizeof(struct smb_hdr) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 4 /* RFC 1001 length field does not count */ +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 2 /* for bcc field itself */) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) buffer->Protocol[0] = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) buffer->Protocol[1] = 'S';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) buffer->Protocol[2] = 'M';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) buffer->Protocol[3] = 'B';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) buffer->Command = smb_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) buffer->Flags = 0x00; /* case sensitive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) buffer->Pid = cpu_to_le16((__u16)current->tgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (treeCon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) buffer->Tid = treeCon->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (treeCon->ses) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (treeCon->ses->capabilities & CAP_UNICODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) buffer->Flags2 |= SMBFLG2_UNICODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (treeCon->ses->capabilities & CAP_STATUS32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) buffer->Flags2 |= SMBFLG2_ERR_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* Uid is not converted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) buffer->Uid = treeCon->ses->Suid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) buffer->Mid = get_next_mid(treeCon->ses->server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) buffer->Flags2 |= SMBFLG2_DFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (treeCon->nocase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) buffer->Flags |= SMBFLG_CASELESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if ((treeCon->ses) && (treeCon->ses->server))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (treeCon->ses->server->sign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* endian conversion of flags is now done just before sending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) buffer->WordCount = (char) word_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) check_smb_hdr(struct smb_hdr *smb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* does it have the right SMB "signature" ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) *(unsigned int *)smb->Protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* if it's a response then accept */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (smb->Flags & SMBFLG_RESPONSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* only one valid case where server sends us request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (smb->Command == SMB_COM_LOCKING_ANDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) get_mid(smb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct smb_hdr *smb = (struct smb_hdr *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) __u32 clc_len; /* calculated length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) total_read, rfclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* is this frame too small to even get to a BCC? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (total_read < 2 + sizeof(struct smb_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if ((total_read >= sizeof(struct smb_hdr) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) && (smb->Status.CifsError != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* it's an error return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) smb->WordCount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* some error cases do not return wct and bcc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) (smb->WordCount == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) char *tmp = (char *)smb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* Need to work around a bug in two servers here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* First, check if the part of bcc they sent was zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (tmp[sizeof(struct smb_hdr)] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* some servers return only half of bcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * on simple responses (wct, bcc both zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * in particular have seen this on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * ulogoffX and FindClose. This leaves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * one byte of bcc potentially unitialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* zero rest of bcc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) tmp[sizeof(struct smb_hdr)+1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) cifs_dbg(VFS, "Length less than smb header size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* otherwise, there is enough to get to the BCC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (check_smb_hdr(smb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) clc_len = smbCalcSize(smb, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (4 + rfclen != total_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) rfclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (4 + rfclen != clc_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) __u16 mid = get_mid(smb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* check if bcc wrapped around for large read responses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* check if lengths match mod 64K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return 0; /* bcc wrapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) clc_len, 4 + rfclen, mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (4 + rfclen < clc_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rfclen, mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) } else if (rfclen > clc_len + 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Some servers (Windows XP in particular) send more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * data than the lengths in the SMB packet would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * indicate on certain calls (byte range locks and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * trans2 find first calls in particular). While the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * client can handle such a frame by ignoring the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * trailing data, we choose limit the amount of extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * data to 512 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) rfclen, mid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct smb_hdr *buf = (struct smb_hdr *)buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct list_head *tmp, *tmp1, *tmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct cifs_ses *ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct cifs_tcon *tcon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct cifsInodeInfo *pCifsInode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct cifsFileInfo *netfile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct smb_com_transaction_change_notify_rsp *pSMBr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) (struct smb_com_transaction_change_notify_rsp *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct file_notify_information *pnotify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) __u32 data_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (get_bcc(buf) > sizeof(struct file_notify_information)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) data_offset = le32_to_cpu(pSMBr->DataOffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (data_offset >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) len - sizeof(struct file_notify_information)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) cifs_dbg(FYI, "Invalid data_offset %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) data_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) pnotify = (struct file_notify_information *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ((char *)&pSMBr->hdr.Protocol + data_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) pnotify->FileName, pnotify->Action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* cifs_dump_mem("Rcvd notify Data: ",buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) sizeof(struct smb_hdr)+60); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (pSMBr->hdr.Status.CifsError) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) cifs_dbg(FYI, "notify err 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) pSMBr->hdr.Status.CifsError);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* no sense logging error on invalid handle on oplock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) break - harmless race between close request and oplock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) break response is expected from time to time writing out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) large dirty files cached on the client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if ((NT_STATUS_INVALID_HANDLE) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) le32_to_cpu(pSMB->hdr.Status.CifsError)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) cifs_dbg(FYI, "Invalid handle on oplock break\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) } else if (ERRbadfid ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return false; /* on valid oplock brk we get "request" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (pSMB->hdr.WordCount != 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) pSMB->LockType, pSMB->OplockLevel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* look up tcon based on tid & uid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) spin_lock(&cifs_tcp_ses_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) list_for_each(tmp, &srv->smb_ses_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) list_for_each(tmp1, &ses->tcon_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (tcon->tid != buf->Tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) spin_lock(&tcon->open_file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) list_for_each(tmp2, &tcon->openFileList) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) netfile = list_entry(tmp2, struct cifsFileInfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) tlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (pSMB->Fid != netfile->fid.netfid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) cifs_dbg(FYI, "file id match, oplock break\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) pCifsInode = CIFS_I(d_inode(netfile->dentry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) &pCifsInode->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) netfile->oplock_epoch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) netfile->oplock_level = pSMB->OplockLevel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) netfile->oplock_break_cancelled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) cifs_queue_oplock_break(netfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) spin_unlock(&tcon->open_file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) spin_unlock(&cifs_tcp_ses_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) spin_unlock(&tcon->open_file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) spin_unlock(&cifs_tcp_ses_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) cifs_dbg(FYI, "No matching file for oplock break\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) spin_unlock(&cifs_tcp_ses_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) dump_smb(void *buf, int smb_buf_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (traceSMB == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) smb_buf_length, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct cifs_tcon *tcon = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (cifs_sb->master_tlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) tcon = cifs_sb_master_tcon(cifs_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) cifs_sb->mnt_cifs_serverino_autodisabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) tcon ? tcon->treeName : "new server");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) oplock &= 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (oplock == OPLOCK_EXCLUSIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) &cinode->vfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) } else if (oplock == OPLOCK_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) cinode->oplock = CIFS_CACHE_READ_FLG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) &cinode->vfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) cinode->oplock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * We wait for oplock breaks to be processed before we attempt to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int cifs_get_writer(struct cifsInodeInfo *cinode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) TASK_KILLABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) spin_lock(&cinode->writers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (!cinode->writers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) cinode->writers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* Check to see if we have started servicing an oplock break */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) cinode->writers--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (cinode->writers == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) spin_unlock(&cinode->writers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) goto start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) spin_unlock(&cinode->writers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) void cifs_put_writer(struct cifsInodeInfo *cinode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) spin_lock(&cinode->writers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) cinode->writers--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (cinode->writers == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) spin_unlock(&cinode->writers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * cifs_queue_oplock_break - queue the oplock break handler for cfile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * This function is called from the demultiplex thread when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * receives an oplock break for @cfile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * Assumes the tcon->open_file_lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * Assumes cfile->file_info_lock is NOT held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * Bump the handle refcount now while we hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * open_file_lock to enforce the validity of it for the oplock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * break handler. The matching put is done at the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) cifsFileInfo_get(cfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) queue_work(cifsoplockd_wq, &cfile->oplock_break);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) backup_cred(struct cifs_sb_info *cifs_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (in_group_p(cifs_sb->mnt_backupgid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) cifs_del_pending_open(struct cifs_pending_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) list_del(&open->olist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct cifs_pending_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) open->oplock = CIFS_OPLOCK_NO_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) open->tlink = tlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) fid->pending_open = open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct cifs_pending_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) spin_lock(&tlink_tcon(tlink)->open_file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) cifs_add_pending_open_locked(fid, tlink, open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* parses DFS refferal V3 structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * caller is responsible for freeing target_nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * - on success - 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * - on failure - errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) unsigned int *num_of_nodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct dfs_info3_param **target_nodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) const struct nls_table *nls_codepage, int remap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) const char *searchName, bool is_unicode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int i, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) char *data_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct dfs_referral_level_3 *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (*num_of_nodes < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) *num_of_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) goto parse_DFS_referrals_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (ref->VersionNumber != cpu_to_le16(3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) le16_to_cpu(ref->VersionNumber));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) goto parse_DFS_referrals_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* get the upper boundary of the resp buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) data_end = (char *)rsp + rsp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (*target_nodes == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) goto parse_DFS_referrals_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* collect necessary data from referrals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) for (i = 0; i < *num_of_nodes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) char *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int max_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct dfs_info3_param *node = (*target_nodes)+i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) node->flags = le32_to_cpu(rsp->DFSFlags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (is_unicode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (tmp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) goto parse_DFS_referrals_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) cifsConvertToUTF16((__le16 *) tmp, searchName,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) PATH_MAX, nls_codepage, remap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) node->path_consumed = cifs_utf16_bytes(tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) le16_to_cpu(rsp->PathConsumed),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) nls_codepage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) node->path_consumed = le16_to_cpu(rsp->PathConsumed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) node->server_type = le16_to_cpu(ref->ServerType);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* copy DfsPath */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) max_len = data_end - temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) node->path_name = cifs_strndup_from_utf16(temp, max_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) is_unicode, nls_codepage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (!node->path_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto parse_DFS_referrals_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* copy link target UNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) max_len = data_end - temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) node->node_name = cifs_strndup_from_utf16(temp, max_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) is_unicode, nls_codepage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!node->node_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) goto parse_DFS_referrals_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) node->ttl = le32_to_cpu(ref->TimeToLive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) parse_DFS_referrals_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) free_dfs_info_array(*target_nodes, *num_of_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) *target_nodes = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) *num_of_nodes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct cifs_aio_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) cifs_aio_ctx_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct cifs_aio_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * to false so that we know when we have to unreference pages within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * cifs_aio_ctx_release()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) INIT_LIST_HEAD(&ctx->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) mutex_init(&ctx->aio_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) init_completion(&ctx->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) kref_init(&ctx->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) cifs_aio_ctx_release(struct kref *refcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct cifs_aio_ctx *ctx = container_of(refcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct cifs_aio_ctx, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) cifsFileInfo_put(ctx->cfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * which means that iov_iter_get_pages() was a success and thus that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * we have taken reference on pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (ctx->bv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) for (i = 0; i < ctx->npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (ctx->should_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) set_page_dirty(ctx->bv[i].bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) put_page(ctx->bv[i].bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) kvfree(ctx->bv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) unsigned int cur_npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) unsigned int npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) size_t count = iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) unsigned int saved_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) size_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct page **pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct bio_vec *bv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (iov_iter_is_kvec(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) memcpy(&ctx->iter, iter, sizeof(*iter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ctx->len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) iov_iter_advance(iter, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (!bv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) bv = vmalloc(array_size(max_pages, sizeof(*bv)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (!bv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (!pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) pages = vmalloc(array_size(max_pages, sizeof(*pages)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (!pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) kvfree(bv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) saved_len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) while (count && npages < max_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (rc > count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) iov_iter_advance(iter, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) count -= rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) rc += start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (npages + cur_npages > max_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) npages + cur_npages, max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) for (i = 0; i < cur_npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) bv[npages + i].bv_page = pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) bv[npages + i].bv_offset = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) bv[npages + i].bv_len = len - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) rc -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) npages += cur_npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ctx->bv = bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ctx->len = saved_len - count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ctx->npages = npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * cifs_alloc_hash - allocate hash and hash context together
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * The caller has to make sure @sdesc is initialized to either NULL or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * a valid context. Both can be freed via cifs_free_hash().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) cifs_alloc_hash(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct crypto_shash **shash, struct sdesc **sdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (*sdesc != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) *shash = crypto_alloc_shash(name, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (IS_ERR(*shash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) cifs_dbg(VFS, "Could not allocate crypto %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) rc = PTR_ERR(*shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) *shash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) *sdesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) *sdesc = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (*sdesc == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) crypto_free_shash(*shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) *shash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) (*sdesc)->shash.tfm = *shash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * cifs_free_hash - free hash and hash context together
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * Freeing a NULL hash or context is safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) kfree(*sdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) *sdesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (*shash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) crypto_free_shash(*shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) *shash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * Input: rqst - a smb_rqst, page - a page index for rqst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * Output: *len - the length for this page, *offset - the offset for this page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) unsigned int *len, unsigned int *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) *len = rqst->rq_pagesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) *offset = (page == 0) ? rqst->rq_offset : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) *len = rqst->rq_tailsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) else if (page == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) *len = rqst->rq_pagesz - rqst->rq_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) void extract_unc_hostname(const char *unc, const char **h, size_t *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) const char *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* skip initial slashes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) while (*unc && (*unc == '\\' || *unc == '/'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) unc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) end = unc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) while (*end && !(*end == '\\' || *end == '/'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) end++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) *h = unc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) *len = end - unc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * copy_path_name - copy src path to dst, possibly truncating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * returns number of bytes written (including trailing nul)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) int copy_path_name(char *dst, const char *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) int name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * will truncate and strlen(dst) will be PATH_MAX-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) name_len = strscpy(dst, src, PATH_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (WARN_ON_ONCE(name_len < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) name_len = PATH_MAX-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* we count the trailing nul */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) name_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct super_cb_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct super_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) static void tcp_super_cb(struct super_block *sb, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct super_cb_data *sd = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) struct TCP_Server_Info *server = sd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct cifs_sb_info *cifs_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct cifs_tcon *tcon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (sd->sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) cifs_sb = CIFS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) tcon = cifs_sb_master_tcon(cifs_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (tcon->ses->server == server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) sd->sb = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct super_cb_data sd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) .data = data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) .sb = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) iterate_supers_type(&cifs_fs_type, f, &sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (!sd.sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * Grab an active reference in order to prevent automounts (DFS links)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * of expiring and then freeing up our cifs superblock pointer while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * we're doing failover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) cifs_sb_active(sd.sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return sd.sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static void __cifs_put_super(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (!IS_ERR_OR_NULL(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) cifs_sb_deactive(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return __cifs_get_super(tcp_super_cb, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) void cifs_put_tcp_super(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) __cifs_put_super(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) #ifdef CONFIG_CIFS_DFS_UPCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) int match_target_ip(struct TCP_Server_Info *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) const char *share, size_t share_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) bool *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) char *target, *tip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct sockaddr tipaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) *result = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) target = kzalloc(share_len + 3, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) rc = dns_resolve_server_name_to_ip(target, &tip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) cifs_dbg(FYI, "%s: target ip: %s\n", __func__, tip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (!cifs_convert_address(&tipaddr, tip, strlen(tip))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) cifs_dbg(VFS, "%s: failed to convert target ip address\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) &tipaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) kfree(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) kfree(tip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static void tcon_super_cb(struct super_block *sb, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct super_cb_data *sd = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct cifs_tcon *tcon = sd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) struct cifs_sb_info *cifs_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (sd->sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) cifs_sb = CIFS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (tcon->dfs_path && cifs_sb->origin_fullpath &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) !strcasecmp(tcon->dfs_path, cifs_sb->origin_fullpath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) sd->sb = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) return __cifs_get_super(tcon_super_cb, tcon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static inline void cifs_put_tcon_super(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) __cifs_put_super(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static inline void cifs_put_tcon_super(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) int update_super_prepath(struct cifs_tcon *tcon, char *prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct super_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct cifs_sb_info *cifs_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) sb = cifs_get_tcon_super(tcon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (IS_ERR(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return PTR_ERR(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) cifs_sb = CIFS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) kfree(cifs_sb->prepath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (prefix && *prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) cifs_sb->prepath = kstrndup(prefix, strlen(prefix), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (!cifs_sb->prepath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) cifs_sb->prepath = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) cifs_put_tcon_super(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }