Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * DFS referral cache routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/nls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "cifsglob.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include "smb2pdu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include "smb2proto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include "cifsproto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "cifs_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "cifs_unicode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "smb2glob.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "dfs_cache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define CACHE_HTABLE_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define CACHE_MAX_ENTRIES 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 				    DFSREF_STORAGE_SERVER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) struct cache_dfs_tgt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	int path_consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) struct cache_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	struct hlist_node hlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	const char *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	int ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	int srvtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	struct timespec64 etime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	int path_consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	int numtgts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	struct list_head tlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	struct cache_dfs_tgt *tgthint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) struct vol_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	char *fullpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	spinlock_t smb_vol_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	struct smb_vol smb_vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	char *mntdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	struct list_head rlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	struct kref refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static struct kmem_cache *cache_slab __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static struct workqueue_struct *dfscache_wq __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static int cache_ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static DEFINE_SPINLOCK(cache_ttl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) static struct nls_table *cache_nlsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * Number of entries in the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static atomic_t cache_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static DECLARE_RWSEM(htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static LIST_HEAD(vol_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) static DEFINE_SPINLOCK(vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static void refresh_cache_worker(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) static int get_normalized_path(const char *path, char **npath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	if (*path == '\\') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		*npath = (char *)path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		*npath = kstrndup(path, strlen(path), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		if (!*npath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		convert_delimiter(*npath, '\\');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static inline void free_normalized_path(const char *path, char *npath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	if (path != npath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		kfree(npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static inline bool cache_entry_expired(const struct cache_entry *ce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	ktime_get_coarse_real_ts64(&ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	return timespec64_compare(&ts, &ce->etime) >= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) static inline void free_tgts(struct cache_entry *ce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	struct cache_dfs_tgt *t, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	list_for_each_entry_safe(t, n, &ce->tlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		list_del(&t->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		kfree(t->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		kfree(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static inline void flush_cache_ent(struct cache_entry *ce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	hlist_del_init(&ce->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	kfree(ce->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	free_tgts(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	atomic_dec(&cache_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	kmem_cache_free(cache_slab, ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) static void flush_cache_ents(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		struct hlist_head *l = &cache_htable[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		struct hlist_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		hlist_for_each_entry_safe(ce, n, l, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 			if (!hlist_unhashed(&ce->hlist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 				flush_cache_ent(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * dfs cache /proc file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static int dfscache_proc_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	struct cache_dfs_tgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	seq_puts(m, "DFS cache\n---------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	down_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		struct hlist_head *l = &cache_htable[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		hlist_for_each_entry(ce, l, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			if (hlist_unhashed(&ce->hlist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 			seq_printf(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 				   "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 				   "interlink=%s,path_consumed=%d,expired=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 				   ce->path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 				   ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 				   ce->ttl, ce->etime.tv_nsec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 				   IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 				   ce->path_consumed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 				   cache_entry_expired(ce) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			list_for_each_entry(t, &ce->tlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 				seq_printf(m, "  %s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 					   t->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 					   ce->tgthint == t ? " (target hint)" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 				   size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	char c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	rc = get_user(c, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	if (c != '0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	cifs_dbg(FYI, "clearing dfs cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	down_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	flush_cache_ents();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	up_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) static int dfscache_proc_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	return single_open(file, dfscache_proc_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) const struct proc_ops dfscache_proc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	.proc_open	= dfscache_proc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	.proc_read	= seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	.proc_lseek	= seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	.proc_release	= single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	.proc_write	= dfscache_proc_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) #ifdef CONFIG_CIFS_DEBUG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static inline void dump_tgts(const struct cache_entry *ce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	struct cache_dfs_tgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	cifs_dbg(FYI, "target list:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	list_for_each_entry(t, &ce->tlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		cifs_dbg(FYI, "  %s%s\n", t->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 			 ce->tgthint == t ? " (target hint)" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) static inline void dump_ce(const struct cache_entry *ce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,interlink=%s,path_consumed=%d,expired=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		 ce->path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		 ce->etime.tv_nsec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		 IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		 ce->path_consumed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		 cache_entry_expired(ce) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	dump_tgts(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	cifs_dbg(FYI, "DFS referrals returned by the server:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	for (i = 0; i < numrefs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		const struct dfs_info3_param *ref = &refs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		cifs_dbg(FYI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			 "\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			 "flags:         0x%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 			 "path_consumed: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 			 "server_type:   0x%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			 "ref_flag:      0x%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			 "path_name:     %s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 			 "node_name:     %s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 			 "ttl:           %d (%dm)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 			 ref->flags, ref->path_consumed, ref->server_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 			 ref->ref_flag, ref->path_name, ref->node_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 			 ref->ttl, ref->ttl / 60);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) #define dump_tgts(e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) #define dump_ce(e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) #define dump_refs(r, n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * dfs_cache_init - Initialize DFS referral cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * Return zero if initialized successfully, otherwise non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) int dfs_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	dfscache_wq = alloc_workqueue("cifs-dfscache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 				      WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	if (!dfscache_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	cache_slab = kmem_cache_create("cifs_dfs_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 				       sizeof(struct cache_entry), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 				       SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (!cache_slab) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		goto out_destroy_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	for (i = 0; i < CACHE_HTABLE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		INIT_HLIST_HEAD(&cache_htable[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	atomic_set(&cache_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	cache_nlsc = load_nls_default();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) out_destroy_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	destroy_workqueue(dfscache_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) static inline unsigned int cache_entry_hash(const void *data, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	unsigned int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	h = jhash(data, size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	return h & (CACHE_HTABLE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) /* Check whether second path component of @path is SYSVOL or NETLOGON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) static inline bool is_sysvol_or_netlogon(const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	const char *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	char sep = path[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	s = strchr(path + 1, sep) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	return !strncasecmp(s, "sysvol", strlen("sysvol")) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		!strncasecmp(s, "netlogon", strlen("netlogon"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) /* Return target hint of a DFS cache entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) static inline char *get_tgt_name(const struct cache_entry *ce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	struct cache_dfs_tgt *t = ce->tgthint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	return t ? t->name : ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) /* Return expire time out of a new entry's TTL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static inline struct timespec64 get_expire_time(int ttl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	struct timespec64 ts = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		.tv_sec = ttl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		.tv_nsec = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	struct timespec64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	ktime_get_coarse_real_ts64(&now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	return timespec64_add(now, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) /* Allocate a new DFS target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	struct cache_dfs_tgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	t = kmalloc(sizeof(*t), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	t->name = kstrndup(name, strlen(name), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	if (!t->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		kfree(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	t->path_consumed = path_consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	INIT_LIST_HEAD(&t->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  * Copy DFS referral information to a cache entry and conditionally update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  * target hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			 struct cache_entry *ce, const char *tgthint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	ce->ttl = refs[0].ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	ce->etime = get_expire_time(ce->ttl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	ce->srvtype = refs[0].server_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	ce->flags = refs[0].ref_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	ce->path_consumed = refs[0].path_consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	for (i = 0; i < numrefs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		struct cache_dfs_tgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		t = alloc_target(refs[i].node_name, refs[i].path_consumed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		if (IS_ERR(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			free_tgts(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			return PTR_ERR(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		if (tgthint && !strcasecmp(t->name, tgthint)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			list_add(&t->list, &ce->tlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			tgthint = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			list_add_tail(&t->list, &ce->tlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		ce->numtgts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	ce->tgthint = list_first_entry_or_null(&ce->tlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 					       struct cache_dfs_tgt, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) /* Allocate a new cache entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) static struct cache_entry *alloc_cache_entry(const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 					     const struct dfs_info3_param *refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 					     int numrefs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	if (!ce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	ce->path = kstrndup(path, strlen(path), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	if (!ce->path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		kmem_cache_free(cache_slab, ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	INIT_HLIST_NODE(&ce->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	INIT_LIST_HEAD(&ce->tlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	rc = copy_ref_data(refs, numrefs, ce, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		kfree(ce->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		kmem_cache_free(cache_slab, ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		ce = ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	return ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) /* Must be called with htable_rw_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) static void remove_oldest_entry(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	struct cache_entry *to_del = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		struct hlist_head *l = &cache_htable[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		hlist_for_each_entry(ce, l, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			if (hlist_unhashed(&ce->hlist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			if (!to_del || timespec64_compare(&ce->etime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 							  &to_del->etime) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 				to_del = ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (!to_del) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	cifs_dbg(FYI, "%s: removing entry\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	dump_ce(to_del);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	flush_cache_ent(to_del);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) /* Add a new DFS cache entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) static int add_cache_entry(const char *path, unsigned int hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			   struct dfs_info3_param *refs, int numrefs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	ce = alloc_cache_entry(path, refs, numrefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	if (IS_ERR(ce))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		return PTR_ERR(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	spin_lock(&cache_ttl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	if (!cache_ttl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		cache_ttl = ce->ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		cache_ttl = min_t(int, cache_ttl, ce->ttl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	spin_unlock(&cache_ttl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	down_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	hlist_add_head(&ce->hlist, &cache_htable[hash]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	dump_ce(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	up_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) static struct cache_entry *__lookup_cache_entry(const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	unsigned int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	h = cache_entry_hash(path, strlen(path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	hlist_for_each_entry(ce, &cache_htable[h], hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		if (!strcasecmp(path, ce->path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			dump_ce(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		ce = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	return ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  * Find a DFS cache entry in hash table and optionally check prefix path against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  * @path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  * Use whole path components in the match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520)  * Must be called with htable_rw_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522)  * Return ERR_PTR(-ENOENT) if the entry is not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	struct cache_entry *ce = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	unsigned int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	char *npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	char *s, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	char sep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	npath = kstrndup(path, strlen(path), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	if (!npath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	s = npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	sep = *npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	while ((s = strchr(s, sep)) && ++cnt < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		s++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (cnt < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		h = cache_entry_hash(path, strlen(path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		ce = __lookup_cache_entry(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	 * Handle paths that have more than two path components and are a complete prefix of the DFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	 * referral request path (@path).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	h = cache_entry_hash(npath, strlen(npath));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	e = npath + strlen(npath) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	while (e > s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		char tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		/* skip separators */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		while (e > s && *e == sep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			e--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		if (e == s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		tmp = *(e+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		*(e+1) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		ce = __lookup_cache_entry(npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		if (!IS_ERR(ce)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			h = cache_entry_hash(npath, strlen(npath));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		*(e+1) = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		/* backward until separator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		while (e > s && *e != sep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			e--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	if (hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		*hash = h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	kfree(npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	return ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static void __vol_release(struct vol_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	kfree(vi->fullpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	kfree(vi->mntdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	cifs_cleanup_volume_info_contents(&vi->smb_vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	kfree(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) static void vol_release(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	spin_lock(&vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	list_del(&vi->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	spin_unlock(&vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	__vol_release(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) static inline void free_vol_list(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	struct vol_info *vi, *nvi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	list_for_each_entry_safe(vi, nvi, &vol_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		list_del_init(&vi->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		__vol_release(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  * dfs_cache_destroy - destroy DFS referral cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) void dfs_cache_destroy(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	cancel_delayed_work_sync(&refresh_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	unload_nls(cache_nlsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	free_vol_list();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	flush_cache_ents();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	kmem_cache_destroy(cache_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	destroy_workqueue(dfscache_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) /* Must be called with htable_rw_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) static int __update_cache_entry(const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 				const struct dfs_info3_param *refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 				int numrefs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	char *s, *th = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	ce = lookup_cache_entry(path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (IS_ERR(ce))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		return PTR_ERR(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	if (ce->tgthint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		s = ce->tgthint->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		th = kstrndup(s, strlen(s), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		if (!th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	free_tgts(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	ce->numtgts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	rc = copy_ref_data(refs, numrefs, ce, th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	kfree(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			    const struct nls_table *nls_codepage, int remap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			    const char *path,  struct dfs_info3_param **refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			    int *numrefs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	if (unlikely(!nls_codepage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	*refs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	*numrefs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 					       nls_codepage, remap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) /* Update an expired cache entry by getting a new DFS referral from server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) static int update_cache_entry(const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			      const struct dfs_info3_param *refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			      int numrefs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	down_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	rc = __update_cache_entry(path, refs, numrefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	up_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  * Find, create or update a DFS cache entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  * If the entry wasn't found, it will create a new one. Or if it was found but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * expired, then it will update the entry accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  * handle them properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			    const struct nls_table *nls_codepage, int remap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			    const char *path, bool noreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	unsigned int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	struct dfs_info3_param *refs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	int numrefs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	bool newent = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	down_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	ce = lookup_cache_entry(path, &hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	 * If @noreq is set, no requests will be sent to the server. Just return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	 * the cache entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (noreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		return PTR_ERR_OR_ZERO(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	if (!IS_ERR(ce)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		if (!cache_entry_expired(ce)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 			dump_ce(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		newent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	 * No entry was found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	 * Request a new DFS referral in order to create a new cache entry, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	 * updating an existing one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			      &refs, &numrefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	dump_refs(refs, numrefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (!newent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		rc = update_cache_entry(path, refs, numrefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		goto out_free_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			 __func__, CACHE_MAX_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		down_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		remove_oldest_entry();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		up_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	rc = add_cache_entry(path, hash, refs, numrefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		atomic_inc(&cache_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) out_free_refs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	free_dfs_info_array(refs, numrefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775)  * Set up a DFS referral from a given cache entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  * Must be called with htable_rw_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) static int setup_referral(const char *path, struct cache_entry *ce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			  struct dfs_info3_param *ref, const char *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	cifs_dbg(FYI, "%s: set up new ref\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	memset(ref, 0, sizeof(*ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	ref->path_name = kstrndup(path, strlen(path), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	if (!ref->path_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	ref->node_name = kstrndup(target, strlen(target), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (!ref->node_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		goto err_free_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	ref->path_consumed = ce->path_consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	ref->ttl = ce->ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	ref->server_type = ce->srvtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	ref->ref_flag = ce->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) err_free_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	kfree(ref->path_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	ref->path_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) /* Return target list of a DFS cache entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	struct list_head *head = &tl->tl_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	struct cache_dfs_tgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	struct dfs_cache_tgt_iterator *it, *nit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	memset(tl, 0, sizeof(*tl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	INIT_LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	list_for_each_entry(t, &ce->tlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		it = kzalloc(sizeof(*it), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		if (!it) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			goto err_free_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		it->it_name = kstrndup(t->name, strlen(t->name), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		if (!it->it_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			kfree(it);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			goto err_free_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		it->it_path_consumed = t->path_consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		if (ce->tgthint == t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			list_add(&it->it_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			list_add_tail(&it->it_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	tl->tl_numtgts = ce->numtgts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) err_free_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	list_for_each_entry_safe(it, nit, head, it_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		kfree(it->it_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		kfree(it);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856)  * dfs_cache_find - find a DFS cache entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  * If it doesn't find the cache entry, then it will get a DFS referral
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  * for @path and create a new entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  * In case the cache entry exists but expired, it will get a DFS referral
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  * for @path and then update the respective cache entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  * These parameters are passed down to the get_dfs_refer() call if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  * needs to be issued:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  * @xid: syscall xid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * @ses: smb session to issue the request on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  * @nls_codepage: charset conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  * @remap: path character remapping type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  * @path: path to lookup in DFS referral cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  * @ref: when non-NULL, store single DFS referral result in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  * @tgt_list: when non-NULL, store complete DFS target list in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875)  * Return zero if the target was found, otherwise non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		   const struct nls_table *nls_codepage, int remap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		   const char *path, struct dfs_info3_param *ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		   struct dfs_cache_tgt_list *tgt_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	char *npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	rc = get_normalized_path(path, &npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		goto out_free_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	down_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	ce = lookup_cache_entry(npath, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	if (IS_ERR(ce)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		rc = PTR_ERR(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		goto out_free_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	if (ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	if (!rc && tgt_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		rc = get_targets(ce, tgt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) out_free_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	free_normalized_path(path, npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918)  * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919)  * the currently connected server.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  * NOTE: This function will neither update a cache entry in case it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922)  * expired, nor create a new cache entry if @path hasn't been found. It heavily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923)  * relies on an existing cache entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925)  * @path: path to lookup in the DFS referral cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  * @ref: when non-NULL, store single DFS referral result in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  * @tgt_list: when non-NULL, store complete DFS target list in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  * Return 0 if successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  * Return -ENOENT if the entry was not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * Return non-zero for other errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			 struct dfs_cache_tgt_list *tgt_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	char *npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	rc = get_normalized_path(path, &npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	down_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	ce = lookup_cache_entry(npath, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (IS_ERR(ce)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		rc = PTR_ERR(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	if (!rc && tgt_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		rc = get_targets(ce, tgt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	free_normalized_path(path, npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  * dfs_cache_update_tgthint - update target hint of a DFS cache entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971)  * If it doesn't find the cache entry, then it will get a DFS referral for @path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  * and create a new entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  * In case the cache entry exists but expired, it will get a DFS referral
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  * for @path and then update the respective cache entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  * @xid: syscall id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  * @ses: smb session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979)  * @nls_codepage: charset conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980)  * @remap: type of character remapping for paths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  * @path: path to lookup in DFS referral cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  * @it: DFS target iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  * Return zero if the target hint was updated successfully, otherwise non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			     const struct nls_table *nls_codepage, int remap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			     const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			     const struct dfs_cache_tgt_iterator *it)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	char *npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	struct cache_dfs_tgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	rc = get_normalized_path(path, &npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		goto out_free_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	down_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	ce = lookup_cache_entry(npath, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	if (IS_ERR(ce)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		rc = PTR_ERR(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	t = ce->tgthint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	if (likely(!strcasecmp(it->it_name, t->name)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	list_for_each_entry(t, &ce->tlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		if (!strcasecmp(t->name, it->it_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			ce->tgthint = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 				 it->it_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	up_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) out_free_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	free_normalized_path(path, npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)  * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  * without sending any requests to the currently connected server.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)  * NOTE: This function will neither update a cache entry in case it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)  * expired, nor create a new cache entry if @path hasn't been found. It heavily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)  * relies on an existing cache entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)  * @path: path to lookup in DFS referral cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)  * @it: target iterator which contains the target hint to update the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)  * entry with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)  * Return zero if the target hint was updated successfully, otherwise non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int dfs_cache_noreq_update_tgthint(const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 				   const struct dfs_cache_tgt_iterator *it)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	char *npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	struct cache_dfs_tgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (!it)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	rc = get_normalized_path(path, &npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	down_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	ce = lookup_cache_entry(npath, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	if (IS_ERR(ce)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		rc = PTR_ERR(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	t = ce->tgthint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	if (unlikely(!strcasecmp(it->it_name, t->name)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	list_for_each_entry(t, &ce->tlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		if (!strcasecmp(t->name, it->it_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			ce->tgthint = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 				 it->it_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	up_write(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	free_normalized_path(path, npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  * target iterator (@it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  * @path: path to lookup in DFS referral cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  * @it: DFS target iterator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  * @ref: DFS referral pointer to set up the gathered information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  * Return zero if the DFS referral was set up correctly, otherwise non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) int dfs_cache_get_tgt_referral(const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			       const struct dfs_cache_tgt_iterator *it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			       struct dfs_info3_param *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	char *npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	if (!it || !ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	rc = get_normalized_path(path, &npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	down_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	ce = lookup_cache_entry(npath, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	if (IS_ERR(ce)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		rc = PTR_ERR(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	rc = setup_referral(path, ce, ref, it->it_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	free_normalized_path(path, npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	memcpy(new, vol, sizeof(*new));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	if (vol->username) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		new->username = kstrndup(vol->username, strlen(vol->username),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 					 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		if (!new->username)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	if (vol->password) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		new->password = kstrndup(vol->password, strlen(vol->password),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 					 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		if (!new->password)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			goto err_free_username;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	if (vol->UNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		cifs_dbg(FYI, "%s: vol->UNC: %s\n", __func__, vol->UNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		new->UNC = kstrndup(vol->UNC, strlen(vol->UNC), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		if (!new->UNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			goto err_free_password;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	if (vol->domainname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		new->domainname = kstrndup(vol->domainname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 					   strlen(vol->domainname), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		if (!new->domainname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			goto err_free_unc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	if (vol->iocharset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		new->iocharset = kstrndup(vol->iocharset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 					  strlen(vol->iocharset), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		if (!new->iocharset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			goto err_free_domainname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	if (vol->prepath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		cifs_dbg(FYI, "%s: vol->prepath: %s\n", __func__, vol->prepath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		new->prepath = kstrndup(vol->prepath, strlen(vol->prepath),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		if (!new->prepath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			goto err_free_iocharset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) err_free_iocharset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	kfree(new->iocharset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) err_free_domainname:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	kfree(new->domainname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) err_free_unc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	kfree(new->UNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) err_free_password:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	kfree_sensitive(new->password);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) err_free_username:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	kfree(new->username);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	kfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)  * dfs_cache_add_vol - add a cifs volume during mount() that will be handled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)  * DFS cache refresh worker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  * @mntdata: mount data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)  * @vol: cifs volume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)  * @fullpath: origin full path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)  * Return zero if volume was set up correctly, otherwise non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	struct vol_info *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	if (!vol || !fullpath || !mntdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	vi = kzalloc(sizeof(*vi), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (!vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	vi->fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	if (!vi->fullpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		goto err_free_vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	rc = dup_vol(vol, &vi->smb_vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		goto err_free_fullpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	vi->mntdata = mntdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	spin_lock_init(&vi->smb_vol_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	kref_init(&vi->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	spin_lock(&vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	list_add_tail(&vi->list, &vol_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	spin_unlock(&vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) err_free_fullpath:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	kfree(vi->fullpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) err_free_vi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	kfree(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /* Must be called with vol_list_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static struct vol_info *find_vol(const char *fullpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	struct vol_info *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	list_for_each_entry(vi, &vol_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		if (!strcasecmp(vi->fullpath, fullpath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			return vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)  * dfs_cache_update_vol - update vol info in DFS cache after failover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)  * @fullpath: fullpath to look up in volume list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)  * @server: TCP ses pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)  * Return zero if volume was updated, otherwise non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	struct vol_info *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	if (!fullpath || !server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	spin_lock(&vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	vi = find_vol(fullpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	if (IS_ERR(vi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		spin_unlock(&vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		return PTR_ERR(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	kref_get(&vi->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	spin_unlock(&vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	cifs_dbg(FYI, "%s: updating volume info\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	spin_lock(&vi->smb_vol_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	memcpy(&vi->smb_vol.dstaddr, &server->dstaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	       sizeof(vi->smb_vol.dstaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	spin_unlock(&vi->smb_vol_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	kref_put(&vi->refcnt, vol_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)  * dfs_cache_del_vol - remove volume info in DFS cache during umount()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)  * @fullpath: fullpath to look up in volume list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) void dfs_cache_del_vol(const char *fullpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	struct vol_info *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (!fullpath || !*fullpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	spin_lock(&vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	vi = find_vol(fullpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	spin_unlock(&vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	if (!IS_ERR(vi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		kref_put(&vi->refcnt, vol_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  * dfs_cache_get_tgt_share - parse a DFS target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)  * @path: DFS full path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  * @it: DFS target iterator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  * @share: tree name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)  * @prefix: prefix path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)  * Return zero if target was parsed correctly, otherwise non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			    char **share, char **prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	char *s, sep, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	size_t plen1, plen2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	*share = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	*prefix = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	sep = it->it_name[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	if (sep != '\\' && sep != '/')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	s = strchr(it->it_name + 1, sep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	/* point to prefix in target node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	s = strchrnul(s + 1, sep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	/* extract target share */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	*share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	if (!*share)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	/* skip separator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	if (*s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		s++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	/* point to prefix in DFS path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	p = path + it->it_path_consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	if (*p == sep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	/* merge prefix paths from DFS path and target node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	plen1 = it->it_name + strlen(it->it_name) - s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	plen2 = path + strlen(path) - p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	if (plen1 || plen2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		len = plen1 + plen2 + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		*prefix = kmalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		if (!*prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			kfree(*share);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			*share = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		if (plen1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 			scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			strscpy(*prefix, p, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* Get all tcons that are within a DFS namespace and can be refreshed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	struct cifs_ses *ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	struct cifs_tcon *tcon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	INIT_LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	spin_lock(&cifs_tcp_ses_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 			if (!tcon->need_reconnect && !tcon->need_reopen_files &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			    tcon->dfs_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 				tcon->tc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 				list_add_tail(&tcon->ulist, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		    ses->tcon_ipc->dfs_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 			list_add_tail(&ses->tcon_ipc->ulist, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	spin_unlock(&cifs_tcp_ses_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) static bool is_dfs_link(const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	char *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	s = strchr(path + 1, '\\');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	return !!strchr(s + 1, '\\');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static char *get_dfs_root(const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	char *s, *npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	s = strchr(path + 1, '\\');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	s = strchr(s + 1, '\\');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	npath = kstrndup(path, s - path, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	if (!npath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	return npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) static inline void put_tcp_server(struct TCP_Server_Info *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	cifs_put_tcp_session(server, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) static struct TCP_Server_Info *get_tcp_server(struct smb_vol *vol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	struct TCP_Server_Info *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	server = cifs_find_tcp_session(vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	if (IS_ERR_OR_NULL(server))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	spin_lock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	if (server->tcpStatus != CifsGood) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		put_tcp_server(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	spin_unlock(&GlobalMid_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	return server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /* Find root SMB session out of a DFS link path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static struct cifs_ses *find_root_ses(struct vol_info *vi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 				      struct cifs_tcon *tcon,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 				      const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	char *rpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	struct dfs_info3_param ref = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	char *mdata = NULL, *devname = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	struct TCP_Server_Info *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	struct cifs_ses *ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	struct smb_vol vol = {NULL};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	rpath = get_dfs_root(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	if (IS_ERR(rpath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		return ERR_CAST(rpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	down_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	ce = lookup_cache_entry(rpath, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	if (IS_ERR(ce)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		ses = ERR_CAST(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		ses = ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 					   &devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	free_dfs_info_param(&ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	if (IS_ERR(mdata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		ses = ERR_CAST(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		mdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	rc = cifs_setup_volume_info(&vol, mdata, devname, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	kfree(devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		ses = ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	server = get_tcp_server(&vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	if (!server) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		ses = ERR_PTR(-EHOSTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	ses = cifs_get_smb_ses(server, &vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	cifs_cleanup_volume_info_contents(&vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	kfree(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	kfree(rpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	return ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) /* Refresh DFS cache entry from a given tcon */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	unsigned int xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	char *path, *npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	struct cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	struct cifs_ses *root_ses = NULL, *ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	struct dfs_info3_param *refs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	int numrefs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	xid = get_xid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	path = tcon->dfs_path + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	rc = get_normalized_path(path, &npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		goto out_free_xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	down_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	ce = lookup_cache_entry(npath, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	if (IS_ERR(ce)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		rc = PTR_ERR(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		goto out_free_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	if (!cache_entry_expired(ce)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		goto out_free_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	up_read(&htable_rw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	/* If it's a DFS Link, then use root SMB session for refreshing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	if (is_dfs_link(npath)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		ses = root_ses = find_root_ses(vi, tcon, npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		if (IS_ERR(ses)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 			rc = PTR_ERR(ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 			root_ses = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 			goto out_free_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		ses = tcon->ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 			      &numrefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		dump_refs(refs, numrefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		rc = update_cache_entry(npath, refs, numrefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		free_dfs_info_array(refs, numrefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	if (root_ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		cifs_put_smb_ses(root_ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) out_free_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	free_normalized_path(path, npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) out_free_xid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	free_xid(xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)  * Worker that will refresh DFS cache based on lowest TTL value from a DFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)  * referral.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static void refresh_cache_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	struct vol_info *vi, *nvi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	struct TCP_Server_Info *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	LIST_HEAD(vols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	LIST_HEAD(tcons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	struct cifs_tcon *tcon, *ntcon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	 * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	 * for refreshing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	spin_lock(&vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	list_for_each_entry(vi, &vol_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		server = get_tcp_server(&vi->smb_vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		if (!server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		kref_get(&vi->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		list_add_tail(&vi->rlist, &vols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		put_tcp_server(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	spin_unlock(&vol_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	/* Walk through all TCONs and refresh any expired cache entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	list_for_each_entry_safe(vi, nvi, &vols, rlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		spin_lock(&vi->smb_vol_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		server = get_tcp_server(&vi->smb_vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		spin_unlock(&vi->smb_vol_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		if (!server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 			goto next_vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		get_tcons(server, &tcons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			 * Skip tcp server if any of its tcons failed to refresh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 			 * (possibily due to reconnects).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 			if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 				rc = refresh_tcon(vi, tcon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 			list_del_init(&tcon->ulist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			cifs_put_tcon(tcon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		put_tcp_server(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) next_vol:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		list_del_init(&vi->rlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		kref_put(&vi->refcnt, vol_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	spin_lock(&cache_ttl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	spin_unlock(&cache_ttl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }