blob: 098b4bc8da59abeb23e8a3df95bf808ff07165d6 [file] [log] [blame]
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * DFS referral cache routines
4 *
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03005 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
Paulo Alcantara54be1f62018-11-14 16:01:21 -02006 */
7
Paulo Alcantara54be1f62018-11-14 16:01:21 -02008#include <linux/jhash.h>
9#include <linux/ktime.h>
10#include <linux/slab.h>
Alexey Dobriyan97a32532020-02-03 17:37:17 -080011#include <linux/proc_fs.h>
Paulo Alcantara54be1f62018-11-14 16:01:21 -020012#include <linux/nls.h>
13#include <linux/workqueue.h>
14#include "cifsglob.h"
15#include "smb2pdu.h"
16#include "smb2proto.h"
17#include "cifsproto.h"
18#include "cifs_debug.h"
19#include "cifs_unicode.h"
20#include "smb2glob.h"
Ronnie Sahlberg24e0a1e2020-12-10 00:06:02 -060021#include "fs_context.h"
Paulo Alcantara54be1f62018-11-14 16:01:21 -020022
23#include "dfs_cache.h"
24
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030025#define CACHE_HTABLE_SIZE 32
26#define CACHE_MAX_ENTRIES 64
Paulo Alcantara54be1f62018-11-14 16:01:21 -020027
28#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
29 DFSREF_STORAGE_SERVER))
30
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030031struct cache_dfs_tgt {
32 char *name;
Paulo Alcantara7548e1d2020-07-21 09:36:42 -030033 int path_consumed;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030034 struct list_head list;
Paulo Alcantara54be1f62018-11-14 16:01:21 -020035};
36
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030037struct cache_entry {
38 struct hlist_node hlist;
39 const char *path;
Paulo Alcantara5ff28362021-02-24 20:59:23 -030040 int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
41 int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
42 int srvtype; /* DFS_REREFERRAL_V3.ServerType */
43 int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030044 struct timespec64 etime;
Paulo Alcantara5ff28362021-02-24 20:59:23 -030045 int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030046 int numtgts;
47 struct list_head tlist;
48 struct cache_dfs_tgt *tgthint;
Paulo Alcantara54be1f62018-11-14 16:01:21 -020049};
50
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030051struct vol_info {
52 char *fullpath;
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -060053 spinlock_t ctx_lock;
54 struct smb3_fs_context ctx;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030055 char *mntdata;
56 struct list_head list;
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -030057 struct list_head rlist;
58 struct kref refcnt;
Paulo Alcantara54be1f62018-11-14 16:01:21 -020059};
60
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030061static struct kmem_cache *cache_slab __read_mostly;
62static struct workqueue_struct *dfscache_wq __read_mostly;
Paulo Alcantara54be1f62018-11-14 16:01:21 -020063
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030064static int cache_ttl;
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -030065static DEFINE_SPINLOCK(cache_ttl_lock);
66
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030067static struct nls_table *cache_nlsc;
Paulo Alcantara54be1f62018-11-14 16:01:21 -020068
69/*
70 * Number of entries in the cache
71 */
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -030072static atomic_t cache_count;
Paulo Alcantara54be1f62018-11-14 16:01:21 -020073
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030074static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -030075static DECLARE_RWSEM(htable_rw_lock);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030076
77static LIST_HEAD(vol_list);
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -030078static DEFINE_SPINLOCK(vol_list_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -020079
80static void refresh_cache_worker(struct work_struct *work);
81
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -030082static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
83
Paulo Alcantara (SUSE)ff2f7fc2019-12-04 17:38:01 -030084static int get_normalized_path(const char *path, char **npath)
Paulo Alcantara54be1f62018-11-14 16:01:21 -020085{
Paulo Alcantara (SUSE)ff2f7fc2019-12-04 17:38:01 -030086 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
87 return -EINVAL;
Paulo Alcantara54be1f62018-11-14 16:01:21 -020088
Paulo Alcantara54be1f62018-11-14 16:01:21 -020089 if (*path == '\\') {
90 *npath = (char *)path;
91 } else {
92 *npath = kstrndup(path, strlen(path), GFP_KERNEL);
93 if (!*npath)
94 return -ENOMEM;
95 convert_delimiter(*npath, '\\');
96 }
97 return 0;
98}
99
100static inline void free_normalized_path(const char *path, char *npath)
101{
102 if (path != npath)
103 kfree(npath);
104}
105
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300106static inline bool cache_entry_expired(const struct cache_entry *ce)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200107{
108 struct timespec64 ts;
109
Stephen Rothwell54e4f732018-12-17 20:11:46 +1100110 ktime_get_coarse_real_ts64(&ts);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300111 return timespec64_compare(&ts, &ce->etime) >= 0;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200112}
113
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300114static inline void free_tgts(struct cache_entry *ce)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200115{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300116 struct cache_dfs_tgt *t, *n;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200117
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300118 list_for_each_entry_safe(t, n, &ce->tlist, list) {
119 list_del(&t->list);
120 kfree(t->name);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200121 kfree(t);
122 }
123}
124
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300125static inline void flush_cache_ent(struct cache_entry *ce)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200126{
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300127 hlist_del_init(&ce->hlist);
Paulo Alcantara (SUSE)199c6bd2019-12-04 17:37:59 -0300128 kfree(ce->path);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200129 free_tgts(ce);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300130 atomic_dec(&cache_count);
131 kmem_cache_free(cache_slab, ce);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200132}
133
134static void flush_cache_ents(void)
135{
136 int i;
137
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300138 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
139 struct hlist_head *l = &cache_htable[i];
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300140 struct hlist_node *n;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300141 struct cache_entry *ce;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200142
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300143 hlist_for_each_entry_safe(ce, n, l, hlist) {
144 if (!hlist_unhashed(&ce->hlist))
145 flush_cache_ent(ce);
146 }
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200147 }
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200148}
149
150/*
151 * dfs cache /proc file
152 */
153static int dfscache_proc_show(struct seq_file *m, void *v)
154{
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300155 int i;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300156 struct cache_entry *ce;
157 struct cache_dfs_tgt *t;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200158
159 seq_puts(m, "DFS cache\n---------\n");
160
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300161 down_read(&htable_rw_lock);
162 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
163 struct hlist_head *l = &cache_htable[i];
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200164
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300165 hlist_for_each_entry(ce, l, hlist) {
166 if (hlist_unhashed(&ce->hlist))
167 continue;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200168
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300169 seq_printf(m,
Paulo Alcantara5ff28362021-02-24 20:59:23 -0300170 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
171 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
172 ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
173 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
174 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300175
176 list_for_each_entry(t, &ce->tlist, list) {
177 seq_printf(m, " %s%s\n",
178 t->name,
179 ce->tgthint == t ? " (target hint)" : "");
180 }
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200181 }
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200182 }
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300183 up_read(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200184
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200185 return 0;
186}
187
188static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
189 size_t count, loff_t *ppos)
190{
191 char c;
192 int rc;
193
194 rc = get_user(c, buffer);
195 if (rc)
196 return rc;
197
198 if (c != '0')
199 return -EINVAL;
200
Joe Perchesa0a30362020-04-14 22:42:53 -0700201 cifs_dbg(FYI, "clearing dfs cache\n");
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300202
203 down_write(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200204 flush_cache_ents();
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300205 up_write(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200206
207 return count;
208}
209
210static int dfscache_proc_open(struct inode *inode, struct file *file)
211{
212 return single_open(file, dfscache_proc_show, NULL);
213}
214
Alexey Dobriyan97a32532020-02-03 17:37:17 -0800215const struct proc_ops dfscache_proc_ops = {
216 .proc_open = dfscache_proc_open,
217 .proc_read = seq_read,
218 .proc_lseek = seq_lseek,
219 .proc_release = single_release,
220 .proc_write = dfscache_proc_write,
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200221};
222
223#ifdef CONFIG_CIFS_DEBUG2
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300224static inline void dump_tgts(const struct cache_entry *ce)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200225{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300226 struct cache_dfs_tgt *t;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200227
228 cifs_dbg(FYI, "target list:\n");
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300229 list_for_each_entry(t, &ce->tlist, list) {
230 cifs_dbg(FYI, " %s%s\n", t->name,
231 ce->tgthint == t ? " (target hint)" : "");
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200232 }
233}
234
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300235static inline void dump_ce(const struct cache_entry *ce)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200236{
Paulo Alcantara5ff28362021-02-24 20:59:23 -0300237 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
Joe Perchesa0a30362020-04-14 22:42:53 -0700238 ce->path,
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300239 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
240 ce->etime.tv_nsec,
Paulo Alcantara5ff28362021-02-24 20:59:23 -0300241 ce->hdr_flags, ce->ref_flags,
242 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300243 ce->path_consumed,
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200244 cache_entry_expired(ce) ? "yes" : "no");
245 dump_tgts(ce);
246}
247
248static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
249{
250 int i;
251
252 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
253 for (i = 0; i < numrefs; i++) {
254 const struct dfs_info3_param *ref = &refs[i];
255
256 cifs_dbg(FYI,
257 "\n"
258 "flags: 0x%x\n"
259 "path_consumed: %d\n"
260 "server_type: 0x%x\n"
261 "ref_flag: 0x%x\n"
262 "path_name: %s\n"
263 "node_name: %s\n"
264 "ttl: %d (%dm)\n",
265 ref->flags, ref->path_consumed, ref->server_type,
266 ref->ref_flag, ref->path_name, ref->node_name,
267 ref->ttl, ref->ttl / 60);
268 }
269}
270#else
271#define dump_tgts(e)
272#define dump_ce(e)
273#define dump_refs(r, n)
274#endif
275
276/**
277 * dfs_cache_init - Initialize DFS referral cache.
278 *
279 * Return zero if initialized successfully, otherwise non-zero.
280 */
281int dfs_cache_init(void)
282{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300283 int rc;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200284 int i;
285
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300286 dfscache_wq = alloc_workqueue("cifs-dfscache",
287 WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
288 if (!dfscache_wq)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200289 return -ENOMEM;
290
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300291 cache_slab = kmem_cache_create("cifs_dfs_cache",
292 sizeof(struct cache_entry), 0,
293 SLAB_HWCACHE_ALIGN, NULL);
294 if (!cache_slab) {
295 rc = -ENOMEM;
296 goto out_destroy_wq;
297 }
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200298
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300299 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
300 INIT_HLIST_HEAD(&cache_htable[i]);
301
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300302 atomic_set(&cache_count, 0);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300303 cache_nlsc = load_nls_default();
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200304
305 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
306 return 0;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300307
308out_destroy_wq:
309 destroy_workqueue(dfscache_wq);
310 return rc;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200311}
312
313static inline unsigned int cache_entry_hash(const void *data, int size)
314{
315 unsigned int h;
316
317 h = jhash(data, size, 0);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300318 return h & (CACHE_HTABLE_SIZE - 1);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200319}
320
321/* Check whether second path component of @path is SYSVOL or NETLOGON */
322static inline bool is_sysvol_or_netlogon(const char *path)
323{
324 const char *s;
325 char sep = path[0];
326
327 s = strchr(path + 1, sep) + 1;
328 return !strncasecmp(s, "sysvol", strlen("sysvol")) ||
329 !strncasecmp(s, "netlogon", strlen("netlogon"));
330}
331
332/* Return target hint of a DFS cache entry */
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300333static inline char *get_tgt_name(const struct cache_entry *ce)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200334{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300335 struct cache_dfs_tgt *t = ce->tgthint;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200336
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300337 return t ? t->name : ERR_PTR(-ENOENT);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200338}
339
340/* Return expire time out of a new entry's TTL */
341static inline struct timespec64 get_expire_time(int ttl)
342{
343 struct timespec64 ts = {
344 .tv_sec = ttl,
345 .tv_nsec = 0,
346 };
Stephen Rothwell54e4f732018-12-17 20:11:46 +1100347 struct timespec64 now;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200348
Stephen Rothwell54e4f732018-12-17 20:11:46 +1100349 ktime_get_coarse_real_ts64(&now);
350 return timespec64_add(now, ts);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200351}
352
353/* Allocate a new DFS target */
Paulo Alcantara7548e1d2020-07-21 09:36:42 -0300354static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200355{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300356 struct cache_dfs_tgt *t;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200357
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300358 t = kmalloc(sizeof(*t), GFP_ATOMIC);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200359 if (!t)
360 return ERR_PTR(-ENOMEM);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300361 t->name = kstrndup(name, strlen(name), GFP_ATOMIC);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300362 if (!t->name) {
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200363 kfree(t);
364 return ERR_PTR(-ENOMEM);
365 }
Paulo Alcantara7548e1d2020-07-21 09:36:42 -0300366 t->path_consumed = path_consumed;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300367 INIT_LIST_HEAD(&t->list);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200368 return t;
369}
370
371/*
372 * Copy DFS referral information to a cache entry and conditionally update
373 * target hint.
374 */
375static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300376 struct cache_entry *ce, const char *tgthint)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200377{
378 int i;
379
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300380 ce->ttl = refs[0].ttl;
381 ce->etime = get_expire_time(ce->ttl);
382 ce->srvtype = refs[0].server_type;
Paulo Alcantara5ff28362021-02-24 20:59:23 -0300383 ce->hdr_flags = refs[0].flags;
384 ce->ref_flags = refs[0].ref_flag;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300385 ce->path_consumed = refs[0].path_consumed;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200386
387 for (i = 0; i < numrefs; i++) {
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300388 struct cache_dfs_tgt *t;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200389
Paulo Alcantara7548e1d2020-07-21 09:36:42 -0300390 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200391 if (IS_ERR(t)) {
392 free_tgts(ce);
393 return PTR_ERR(t);
394 }
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300395 if (tgthint && !strcasecmp(t->name, tgthint)) {
396 list_add(&t->list, &ce->tlist);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200397 tgthint = NULL;
398 } else {
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300399 list_add_tail(&t->list, &ce->tlist);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200400 }
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300401 ce->numtgts++;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200402 }
403
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300404 ce->tgthint = list_first_entry_or_null(&ce->tlist,
405 struct cache_dfs_tgt, list);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200406
407 return 0;
408}
409
410/* Allocate a new cache entry */
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300411static struct cache_entry *alloc_cache_entry(const char *path,
412 const struct dfs_info3_param *refs,
413 int numrefs)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200414{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300415 struct cache_entry *ce;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200416 int rc;
417
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300418 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200419 if (!ce)
420 return ERR_PTR(-ENOMEM);
421
Paulo Alcantara (SUSE)199c6bd2019-12-04 17:37:59 -0300422 ce->path = kstrndup(path, strlen(path), GFP_KERNEL);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300423 if (!ce->path) {
424 kmem_cache_free(cache_slab, ce);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200425 return ERR_PTR(-ENOMEM);
426 }
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300427 INIT_HLIST_NODE(&ce->hlist);
428 INIT_LIST_HEAD(&ce->tlist);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200429
430 rc = copy_ref_data(refs, numrefs, ce, NULL);
431 if (rc) {
Paulo Alcantara (SUSE)199c6bd2019-12-04 17:37:59 -0300432 kfree(ce->path);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300433 kmem_cache_free(cache_slab, ce);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200434 ce = ERR_PTR(rc);
435 }
436 return ce;
437}
438
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300439/* Must be called with htable_rw_lock held */
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200440static void remove_oldest_entry(void)
441{
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300442 int i;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300443 struct cache_entry *ce;
444 struct cache_entry *to_del = NULL;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200445
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300446 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
447 struct hlist_head *l = &cache_htable[i];
448
449 hlist_for_each_entry(ce, l, hlist) {
450 if (hlist_unhashed(&ce->hlist))
451 continue;
452 if (!to_del || timespec64_compare(&ce->etime,
453 &to_del->etime) < 0)
454 to_del = ce;
455 }
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200456 }
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300457
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200458 if (!to_del) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700459 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300460 return;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200461 }
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300462
Joe Perchesa0a30362020-04-14 22:42:53 -0700463 cifs_dbg(FYI, "%s: removing entry\n", __func__);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200464 dump_ce(to_del);
465 flush_cache_ent(to_del);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200466}
467
468/* Add a new DFS cache entry */
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300469static int add_cache_entry(const char *path, unsigned int hash,
470 struct dfs_info3_param *refs, int numrefs)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200471{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300472 struct cache_entry *ce;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200473
474 ce = alloc_cache_entry(path, refs, numrefs);
475 if (IS_ERR(ce))
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300476 return PTR_ERR(ce);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200477
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -0300478 spin_lock(&cache_ttl_lock);
479 if (!cache_ttl) {
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300480 cache_ttl = ce->ttl;
481 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200482 } else {
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300483 cache_ttl = min_t(int, cache_ttl, ce->ttl);
484 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200485 }
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -0300486 spin_unlock(&cache_ttl_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200487
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300488 down_write(&htable_rw_lock);
489 hlist_add_head(&ce->hlist, &cache_htable[hash]);
490 dump_ce(ce);
491 up_write(&htable_rw_lock);
492
493 return 0;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200494}
495
Paulo Alcantara2e5de422020-07-21 09:36:39 -0300496static struct cache_entry *__lookup_cache_entry(const char *path)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200497{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300498 struct cache_entry *ce;
499 unsigned int h;
500 bool found = false;
501
502 h = cache_entry_hash(path, strlen(path));
503
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300504 hlist_for_each_entry(ce, &cache_htable[h], hlist) {
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300505 if (!strcasecmp(path, ce->path)) {
506 found = true;
507 dump_ce(ce);
508 break;
509 }
510 }
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300511
512 if (!found)
513 ce = ERR_PTR(-ENOENT);
Paulo Alcantara2e5de422020-07-21 09:36:39 -0300514 return ce;
515}
516
517/*
518 * Find a DFS cache entry in hash table and optionally check prefix path against
519 * @path.
520 * Use whole path components in the match.
521 * Must be called with htable_rw_lock held.
522 *
523 * Return ERR_PTR(-ENOENT) if the entry is not found.
524 */
525static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash)
526{
527 struct cache_entry *ce = ERR_PTR(-ENOENT);
528 unsigned int h;
529 int cnt = 0;
530 char *npath;
531 char *s, *e;
532 char sep;
533
534 npath = kstrndup(path, strlen(path), GFP_KERNEL);
535 if (!npath)
536 return ERR_PTR(-ENOMEM);
537
538 s = npath;
539 sep = *npath;
540 while ((s = strchr(s, sep)) && ++cnt < 3)
541 s++;
542
543 if (cnt < 3) {
544 h = cache_entry_hash(path, strlen(path));
545 ce = __lookup_cache_entry(path);
546 goto out;
547 }
548 /*
549 * Handle paths that have more than two path components and are a complete prefix of the DFS
550 * referral request path (@path).
551 *
552 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
553 */
554 h = cache_entry_hash(npath, strlen(npath));
555 e = npath + strlen(npath) - 1;
556 while (e > s) {
557 char tmp;
558
559 /* skip separators */
560 while (e > s && *e == sep)
561 e--;
562 if (e == s)
563 goto out;
564
565 tmp = *(e+1);
566 *(e+1) = 0;
567
568 ce = __lookup_cache_entry(npath);
569 if (!IS_ERR(ce)) {
570 h = cache_entry_hash(npath, strlen(npath));
571 break;
572 }
573
574 *(e+1) = tmp;
575 /* backward until separator */
576 while (e > s && *e != sep)
577 e--;
578 }
579out:
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300580 if (hash)
581 *hash = h;
Paulo Alcantara2e5de422020-07-21 09:36:39 -0300582 kfree(npath);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300583 return ce;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200584}
585
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -0300586static void __vol_release(struct vol_info *vi)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200587{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300588 kfree(vi->fullpath);
589 kfree(vi->mntdata);
Ronnie Sahlbergc741cba2020-12-14 16:40:16 +1000590 smb3_cleanup_fs_context_contents(&vi->ctx);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200591 kfree(vi);
592}
593
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -0300594static void vol_release(struct kref *kref)
595{
596 struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
597
598 spin_lock(&vol_list_lock);
599 list_del(&vi->list);
600 spin_unlock(&vol_list_lock);
601 __vol_release(vi);
602}
603
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200604static inline void free_vol_list(void)
605{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300606 struct vol_info *vi, *nvi;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200607
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -0300608 list_for_each_entry_safe(vi, nvi, &vol_list, list) {
609 list_del_init(&vi->list);
610 __vol_release(vi);
611 }
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200612}
613
614/**
615 * dfs_cache_destroy - destroy DFS referral cache
616 */
617void dfs_cache_destroy(void)
618{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300619 cancel_delayed_work_sync(&refresh_task);
620 unload_nls(cache_nlsc);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200621 free_vol_list();
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200622 flush_cache_ents();
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300623 kmem_cache_destroy(cache_slab);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300624 destroy_workqueue(dfscache_wq);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200625
626 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
627}
628
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300629/* Must be called with htable_rw_lock held */
630static int __update_cache_entry(const char *path,
631 const struct dfs_info3_param *refs,
632 int numrefs)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200633{
634 int rc;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300635 struct cache_entry *ce;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200636 char *s, *th = NULL;
637
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300638 ce = lookup_cache_entry(path, NULL);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200639 if (IS_ERR(ce))
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300640 return PTR_ERR(ce);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200641
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300642 if (ce->tgthint) {
643 s = ce->tgthint->name;
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300644 th = kstrndup(s, strlen(s), GFP_ATOMIC);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200645 if (!th)
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300646 return -ENOMEM;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200647 }
648
649 free_tgts(ce);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300650 ce->numtgts = 0;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200651
652 rc = copy_ref_data(refs, numrefs, ce, th);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300653
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200654 kfree(th);
655
YueHaibingeecfc572020-01-17 10:21:56 +0800656 return rc;
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300657}
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200658
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300659static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
660 const struct nls_table *nls_codepage, int remap,
661 const char *path, struct dfs_info3_param **refs,
662 int *numrefs)
663{
664 cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
665
666 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
667 return -EOPNOTSUPP;
668 if (unlikely(!nls_codepage))
669 return -EINVAL;
670
671 *refs = NULL;
672 *numrefs = 0;
673
674 return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
675 nls_codepage, remap);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200676}
677
678/* Update an expired cache entry by getting a new DFS referral from server */
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300679static int update_cache_entry(const char *path,
680 const struct dfs_info3_param *refs,
681 int numrefs)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200682{
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300683
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200684 int rc;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200685
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300686 down_write(&htable_rw_lock);
687 rc = __update_cache_entry(path, refs, numrefs);
688 up_write(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200689
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300690 return rc;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200691}
692
693/*
694 * Find, create or update a DFS cache entry.
695 *
696 * If the entry wasn't found, it will create a new one. Or if it was found but
697 * expired, then it will update the entry accordingly.
698 *
699 * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
700 * handle them properly.
701 */
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300702static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
703 const struct nls_table *nls_codepage, int remap,
704 const char *path, bool noreq)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200705{
706 int rc;
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300707 unsigned int hash;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300708 struct cache_entry *ce;
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300709 struct dfs_info3_param *refs = NULL;
710 int numrefs = 0;
711 bool newent = false;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200712
713 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
714
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300715 down_read(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200716
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300717 ce = lookup_cache_entry(path, &hash);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200718
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300719 /*
720 * If @noreq is set, no requests will be sent to the server. Just return
721 * the cache entry.
722 */
723 if (noreq) {
724 up_read(&htable_rw_lock);
Chen Zhou050d2a82020-01-22 18:20:30 +0800725 return PTR_ERR_OR_ZERO(ce);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200726 }
727
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300728 if (!IS_ERR(ce)) {
729 if (!cache_entry_expired(ce)) {
730 dump_ce(ce);
731 up_read(&htable_rw_lock);
732 return 0;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200733 }
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300734 } else {
735 newent = true;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200736 }
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300737
738 up_read(&htable_rw_lock);
739
740 /*
741 * No entry was found.
742 *
743 * Request a new DFS referral in order to create a new cache entry, or
744 * updating an existing one.
745 */
746 rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
747 &refs, &numrefs);
748 if (rc)
749 return rc;
750
751 dump_refs(refs, numrefs);
752
753 if (!newent) {
754 rc = update_cache_entry(path, refs, numrefs);
755 goto out_free_refs;
756 }
757
758 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700759 cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
760 __func__, CACHE_MAX_ENTRIES);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300761 down_write(&htable_rw_lock);
762 remove_oldest_entry();
763 up_write(&htable_rw_lock);
764 }
765
766 rc = add_cache_entry(path, hash, refs, numrefs);
767 if (!rc)
768 atomic_inc(&cache_count);
769
770out_free_refs:
771 free_dfs_info_array(refs, numrefs);
772 return rc;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200773}
774
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300775/*
776 * Set up a DFS referral from a given cache entry.
777 *
778 * Must be called with htable_rw_lock held.
779 */
780static int setup_referral(const char *path, struct cache_entry *ce,
781 struct dfs_info3_param *ref, const char *target)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200782{
783 int rc;
784
785 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
786
787 memset(ref, 0, sizeof(*ref));
788
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300789 ref->path_name = kstrndup(path, strlen(path), GFP_ATOMIC);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200790 if (!ref->path_name)
791 return -ENOMEM;
792
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300793 ref->node_name = kstrndup(target, strlen(target), GFP_ATOMIC);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200794 if (!ref->node_name) {
795 rc = -ENOMEM;
796 goto err_free_path;
797 }
798
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300799 ref->path_consumed = ce->path_consumed;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300800 ref->ttl = ce->ttl;
801 ref->server_type = ce->srvtype;
Paulo Alcantara5ff28362021-02-24 20:59:23 -0300802 ref->ref_flag = ce->ref_flags;
803 ref->flags = ce->hdr_flags;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200804
805 return 0;
806
807err_free_path:
808 kfree(ref->path_name);
809 ref->path_name = NULL;
810 return rc;
811}
812
813/* Return target list of a DFS cache entry */
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300814static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200815{
816 int rc;
817 struct list_head *head = &tl->tl_list;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300818 struct cache_dfs_tgt *t;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200819 struct dfs_cache_tgt_iterator *it, *nit;
820
821 memset(tl, 0, sizeof(*tl));
822 INIT_LIST_HEAD(head);
823
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300824 list_for_each_entry(t, &ce->tlist, list) {
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300825 it = kzalloc(sizeof(*it), GFP_ATOMIC);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200826 if (!it) {
827 rc = -ENOMEM;
828 goto err_free_it;
829 }
830
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300831 it->it_name = kstrndup(t->name, strlen(t->name), GFP_ATOMIC);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200832 if (!it->it_name) {
Dan Carpenterc715f892019-01-05 21:18:03 +0300833 kfree(it);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200834 rc = -ENOMEM;
835 goto err_free_it;
836 }
Paulo Alcantara7548e1d2020-07-21 09:36:42 -0300837 it->it_path_consumed = t->path_consumed;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200838
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300839 if (ce->tgthint == t)
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200840 list_add(&it->it_list, head);
841 else
842 list_add_tail(&it->it_list, head);
843 }
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300844
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300845 tl->tl_numtgts = ce->numtgts;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200846
847 return 0;
848
849err_free_it:
850 list_for_each_entry_safe(it, nit, head, it_list) {
851 kfree(it->it_name);
852 kfree(it);
853 }
854 return rc;
855}
856
857/**
858 * dfs_cache_find - find a DFS cache entry
859 *
860 * If it doesn't find the cache entry, then it will get a DFS referral
861 * for @path and create a new entry.
862 *
863 * In case the cache entry exists but expired, it will get a DFS referral
864 * for @path and then update the respective cache entry.
865 *
866 * These parameters are passed down to the get_dfs_refer() call if it
867 * needs to be issued:
868 * @xid: syscall xid
869 * @ses: smb session to issue the request on
870 * @nls_codepage: charset conversion
871 * @remap: path character remapping type
872 * @path: path to lookup in DFS referral cache.
873 *
874 * @ref: when non-NULL, store single DFS referral result in it.
875 * @tgt_list: when non-NULL, store complete DFS target list in it.
876 *
877 * Return zero if the target was found, otherwise non-zero.
878 */
879int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
880 const struct nls_table *nls_codepage, int remap,
881 const char *path, struct dfs_info3_param *ref,
882 struct dfs_cache_tgt_list *tgt_list)
883{
884 int rc;
885 char *npath;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300886 struct cache_entry *ce;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200887
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200888 rc = get_normalized_path(path, &npath);
889 if (rc)
890 return rc;
891
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300892 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
893 if (rc)
894 goto out_free_path;
895
896 down_read(&htable_rw_lock);
897
898 ce = lookup_cache_entry(npath, NULL);
899 if (IS_ERR(ce)) {
900 up_read(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200901 rc = PTR_ERR(ce);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300902 goto out_free_path;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200903 }
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300904
905 if (ref)
906 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
907 else
908 rc = 0;
909 if (!rc && tgt_list)
910 rc = get_targets(ce, tgt_list);
911
912 up_read(&htable_rw_lock);
913
914out_free_path:
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200915 free_normalized_path(path, npath);
916 return rc;
917}
918
919/**
920 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
921 * the currently connected server.
922 *
923 * NOTE: This function will neither update a cache entry in case it was
924 * expired, nor create a new cache entry if @path hasn't been found. It heavily
925 * relies on an existing cache entry.
926 *
927 * @path: path to lookup in the DFS referral cache.
928 * @ref: when non-NULL, store single DFS referral result in it.
929 * @tgt_list: when non-NULL, store complete DFS target list in it.
930 *
931 * Return 0 if successful.
932 * Return -ENOENT if the entry was not found.
933 * Return non-zero for other errors.
934 */
935int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
936 struct dfs_cache_tgt_list *tgt_list)
937{
938 int rc;
939 char *npath;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300940 struct cache_entry *ce;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200941
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200942 rc = get_normalized_path(path, &npath);
943 if (rc)
944 return rc;
945
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300946 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
947
948 down_read(&htable_rw_lock);
949
950 ce = lookup_cache_entry(npath, NULL);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200951 if (IS_ERR(ce)) {
952 rc = PTR_ERR(ce);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300953 goto out_unlock;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200954 }
955
956 if (ref)
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300957 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200958 else
959 rc = 0;
960 if (!rc && tgt_list)
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300961 rc = get_targets(ce, tgt_list);
962
963out_unlock:
964 up_read(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200965 free_normalized_path(path, npath);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -0300966
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200967 return rc;
968}
969
970/**
971 * dfs_cache_update_tgthint - update target hint of a DFS cache entry
972 *
973 * If it doesn't find the cache entry, then it will get a DFS referral for @path
974 * and create a new entry.
975 *
976 * In case the cache entry exists but expired, it will get a DFS referral
977 * for @path and then update the respective cache entry.
978 *
979 * @xid: syscall id
980 * @ses: smb session
981 * @nls_codepage: charset conversion
982 * @remap: type of character remapping for paths
983 * @path: path to lookup in DFS referral cache.
984 * @it: DFS target iterator
985 *
986 * Return zero if the target hint was updated successfully, otherwise non-zero.
987 */
988int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
989 const struct nls_table *nls_codepage, int remap,
990 const char *path,
991 const struct dfs_cache_tgt_iterator *it)
992{
993 int rc;
994 char *npath;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -0300995 struct cache_entry *ce;
996 struct cache_dfs_tgt *t;
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200997
Paulo Alcantara54be1f62018-11-14 16:01:21 -0200998 rc = get_normalized_path(path, &npath);
999 if (rc)
1000 return rc;
1001
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001002 cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001003
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001004 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
1005 if (rc)
1006 goto out_free_path;
1007
1008 down_write(&htable_rw_lock);
1009
1010 ce = lookup_cache_entry(npath, NULL);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001011 if (IS_ERR(ce)) {
1012 rc = PTR_ERR(ce);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001013 goto out_unlock;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001014 }
1015
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001016 t = ce->tgthint;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001017
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001018 if (likely(!strcasecmp(it->it_name, t->name)))
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001019 goto out_unlock;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001020
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001021 list_for_each_entry(t, &ce->tlist, list) {
1022 if (!strcasecmp(t->name, it->it_name)) {
1023 ce->tgthint = t;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001024 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1025 it->it_name);
1026 break;
1027 }
1028 }
1029
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001030out_unlock:
1031 up_write(&htable_rw_lock);
1032out_free_path:
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001033 free_normalized_path(path, npath);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001034
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001035 return rc;
1036}
1037
1038/**
1039 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
1040 * without sending any requests to the currently connected server.
1041 *
1042 * NOTE: This function will neither update a cache entry in case it was
1043 * expired, nor create a new cache entry if @path hasn't been found. It heavily
1044 * relies on an existing cache entry.
1045 *
1046 * @path: path to lookup in DFS referral cache.
1047 * @it: target iterator which contains the target hint to update the cache
1048 * entry with.
1049 *
1050 * Return zero if the target hint was updated successfully, otherwise non-zero.
1051 */
1052int dfs_cache_noreq_update_tgthint(const char *path,
1053 const struct dfs_cache_tgt_iterator *it)
1054{
1055 int rc;
1056 char *npath;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001057 struct cache_entry *ce;
1058 struct cache_dfs_tgt *t;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001059
Paulo Alcantara (SUSE)ff2f7fc2019-12-04 17:38:01 -03001060 if (!it)
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001061 return -EINVAL;
1062
1063 rc = get_normalized_path(path, &npath);
1064 if (rc)
1065 return rc;
1066
1067 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1068
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001069 down_write(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001070
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001071 ce = lookup_cache_entry(npath, NULL);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001072 if (IS_ERR(ce)) {
1073 rc = PTR_ERR(ce);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001074 goto out_unlock;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001075 }
1076
1077 rc = 0;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001078 t = ce->tgthint;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001079
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001080 if (unlikely(!strcasecmp(it->it_name, t->name)))
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001081 goto out_unlock;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001082
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001083 list_for_each_entry(t, &ce->tlist, list) {
1084 if (!strcasecmp(t->name, it->it_name)) {
1085 ce->tgthint = t;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001086 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1087 it->it_name);
1088 break;
1089 }
1090 }
1091
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001092out_unlock:
1093 up_write(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001094 free_normalized_path(path, npath);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001095
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001096 return rc;
1097}
1098
1099/**
1100 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1101 * target iterator (@it).
1102 *
1103 * @path: path to lookup in DFS referral cache.
1104 * @it: DFS target iterator.
1105 * @ref: DFS referral pointer to set up the gathered information.
1106 *
1107 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1108 */
1109int dfs_cache_get_tgt_referral(const char *path,
1110 const struct dfs_cache_tgt_iterator *it,
1111 struct dfs_info3_param *ref)
1112{
1113 int rc;
1114 char *npath;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001115 struct cache_entry *ce;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001116
1117 if (!it || !ref)
1118 return -EINVAL;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001119
1120 rc = get_normalized_path(path, &npath);
1121 if (rc)
1122 return rc;
1123
1124 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1125
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001126 down_read(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001127
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001128 ce = lookup_cache_entry(npath, NULL);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001129 if (IS_ERR(ce)) {
1130 rc = PTR_ERR(ce);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001131 goto out_unlock;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001132 }
1133
1134 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1135
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001136 rc = setup_referral(path, ce, ref, it->it_name);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001137
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001138out_unlock:
1139 up_read(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001140 free_normalized_path(path, npath);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001141
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001142 return rc;
1143}
1144
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001145/**
Ronnie Sahlberg24e0a1e2020-12-10 00:06:02 -06001146 * dfs_cache_add_vol - add a cifs context during mount() that will be handled by
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001147 * DFS cache refresh worker.
1148 *
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001149 * @mntdata: mount data.
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001150 * @ctx: cifs context.
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001151 * @fullpath: origin full path.
1152 *
Ronnie Sahlberg24e0a1e2020-12-10 00:06:02 -06001153 * Return zero if context was set up correctly, otherwise non-zero.
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001154 */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001155int dfs_cache_add_vol(char *mntdata, struct smb3_fs_context *ctx, const char *fullpath)
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001156{
1157 int rc;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001158 struct vol_info *vi;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001159
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001160 if (!ctx || !fullpath || !mntdata)
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001161 return -EINVAL;
1162
1163 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1164
1165 vi = kzalloc(sizeof(*vi), GFP_KERNEL);
1166 if (!vi)
1167 return -ENOMEM;
1168
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001169 vi->fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
1170 if (!vi->fullpath) {
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001171 rc = -ENOMEM;
1172 goto err_free_vi;
1173 }
1174
Ronnie Sahlberg837e3a12020-11-02 09:36:24 +10001175 rc = smb3_fs_context_dup(&vi->ctx, ctx);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001176 if (rc)
1177 goto err_free_fullpath;
1178
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001179 vi->mntdata = mntdata;
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001180 spin_lock_init(&vi->ctx_lock);
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001181 kref_init(&vi->refcnt);
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001182
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001183 spin_lock(&vol_list_lock);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001184 list_add_tail(&vi->list, &vol_list);
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001185 spin_unlock(&vol_list_lock);
1186
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001187 return 0;
1188
1189err_free_fullpath:
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001190 kfree(vi->fullpath);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001191err_free_vi:
1192 kfree(vi);
1193 return rc;
1194}
1195
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001196/* Must be called with vol_list_lock held */
1197static struct vol_info *find_vol(const char *fullpath)
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001198{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001199 struct vol_info *vi;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001200
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001201 list_for_each_entry(vi, &vol_list, list) {
1202 cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
1203 if (!strcasecmp(vi->fullpath, fullpath))
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001204 return vi;
1205 }
1206 return ERR_PTR(-ENOENT);
1207}
1208
1209/**
1210 * dfs_cache_update_vol - update vol info in DFS cache after failover
1211 *
1212 * @fullpath: fullpath to look up in volume list.
1213 * @server: TCP ses pointer.
1214 *
1215 * Return zero if volume was updated, otherwise non-zero.
1216 */
1217int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
1218{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001219 struct vol_info *vi;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001220
1221 if (!fullpath || !server)
1222 return -EINVAL;
1223
1224 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1225
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001226 spin_lock(&vol_list_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001227 vi = find_vol(fullpath);
1228 if (IS_ERR(vi)) {
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001229 spin_unlock(&vol_list_lock);
1230 return PTR_ERR(vi);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001231 }
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001232 kref_get(&vi->refcnt);
1233 spin_unlock(&vol_list_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001234
1235 cifs_dbg(FYI, "%s: updating volume info\n", __func__);
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001236 spin_lock(&vi->ctx_lock);
1237 memcpy(&vi->ctx.dstaddr, &server->dstaddr,
1238 sizeof(vi->ctx.dstaddr));
1239 spin_unlock(&vi->ctx_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001240
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001241 kref_put(&vi->refcnt, vol_release);
1242
1243 return 0;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001244}
1245
1246/**
1247 * dfs_cache_del_vol - remove volume info in DFS cache during umount()
1248 *
1249 * @fullpath: fullpath to look up in volume list.
1250 */
1251void dfs_cache_del_vol(const char *fullpath)
1252{
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001253 struct vol_info *vi;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001254
1255 if (!fullpath || !*fullpath)
1256 return;
1257
1258 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1259
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001260 spin_lock(&vol_list_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001261 vi = find_vol(fullpath);
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001262 spin_unlock(&vol_list_lock);
1263
Tom Rix77b6ec02021-01-05 12:21:26 -08001264 if (!IS_ERR(vi))
1265 kref_put(&vi->refcnt, vol_release);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001266}
1267
Paulo Alcantara (SUSE)bacd7042020-02-20 19:49:34 -03001268/**
1269 * dfs_cache_get_tgt_share - parse a DFS target
1270 *
Paulo Alcantara7548e1d2020-07-21 09:36:42 -03001271 * @path: DFS full path
Paulo Alcantara (SUSE)bacd7042020-02-20 19:49:34 -03001272 * @it: DFS target iterator.
1273 * @share: tree name.
Paulo Alcantara (SUSE)bacd7042020-02-20 19:49:34 -03001274 * @prefix: prefix path.
Paulo Alcantara (SUSE)bacd7042020-02-20 19:49:34 -03001275 *
1276 * Return zero if target was parsed correctly, otherwise non-zero.
1277 */
Paulo Alcantara7548e1d2020-07-21 09:36:42 -03001278int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
1279 char **share, char **prefix)
Paulo Alcantara (SUSE)bacd7042020-02-20 19:49:34 -03001280{
Paulo Alcantara7548e1d2020-07-21 09:36:42 -03001281 char *s, sep, *p;
1282 size_t len;
1283 size_t plen1, plen2;
Paulo Alcantara (SUSE)bacd7042020-02-20 19:49:34 -03001284
Paulo Alcantara7548e1d2020-07-21 09:36:42 -03001285 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
Paulo Alcantara (SUSE)bacd7042020-02-20 19:49:34 -03001286 return -EINVAL;
1287
Paulo Alcantara7548e1d2020-07-21 09:36:42 -03001288 *share = NULL;
1289 *prefix = NULL;
1290
Paulo Alcantara (SUSE)bacd7042020-02-20 19:49:34 -03001291 sep = it->it_name[0];
1292 if (sep != '\\' && sep != '/')
1293 return -EINVAL;
1294
1295 s = strchr(it->it_name + 1, sep);
1296 if (!s)
1297 return -EINVAL;
1298
Paulo Alcantara7548e1d2020-07-21 09:36:42 -03001299 /* point to prefix in target node */
Paulo Alcantara (SUSE)bacd7042020-02-20 19:49:34 -03001300 s = strchrnul(s + 1, sep);
1301
Paulo Alcantara7548e1d2020-07-21 09:36:42 -03001302 /* extract target share */
1303 *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
1304 if (!*share)
1305 return -ENOMEM;
Paulo Alcantara (SUSE)bacd7042020-02-20 19:49:34 -03001306
Paulo Alcantara7548e1d2020-07-21 09:36:42 -03001307 /* skip separator */
1308 if (*s)
1309 s++;
1310 /* point to prefix in DFS path */
1311 p = path + it->it_path_consumed;
1312 if (*p == sep)
1313 p++;
1314
1315 /* merge prefix paths from DFS path and target node */
1316 plen1 = it->it_name + strlen(it->it_name) - s;
1317 plen2 = path + strlen(path) - p;
1318 if (plen1 || plen2) {
1319 len = plen1 + plen2 + 2;
1320 *prefix = kmalloc(len, GFP_KERNEL);
1321 if (!*prefix) {
1322 kfree(*share);
1323 *share = NULL;
1324 return -ENOMEM;
1325 }
1326 if (plen1)
1327 scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
1328 else
1329 strscpy(*prefix, p, len);
1330 }
Paulo Alcantara (SUSE)bacd7042020-02-20 19:49:34 -03001331 return 0;
1332}
1333
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001334/* Get all tcons that are within a DFS namespace and can be refreshed */
1335static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
1336{
1337 struct cifs_ses *ses;
1338 struct cifs_tcon *tcon;
1339
1340 INIT_LIST_HEAD(head);
1341
1342 spin_lock(&cifs_tcp_ses_lock);
1343 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1344 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1345 if (!tcon->need_reconnect && !tcon->need_reopen_files &&
1346 tcon->dfs_path) {
1347 tcon->tc_count++;
1348 list_add_tail(&tcon->ulist, head);
1349 }
1350 }
1351 if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect &&
1352 ses->tcon_ipc->dfs_path) {
1353 list_add_tail(&ses->tcon_ipc->ulist, head);
1354 }
1355 }
1356 spin_unlock(&cifs_tcp_ses_lock);
1357}
1358
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001359static bool is_dfs_link(const char *path)
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001360{
1361 char *s;
1362
1363 s = strchr(path + 1, '\\');
1364 if (!s)
1365 return false;
1366 return !!strchr(s + 1, '\\');
1367}
1368
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001369static char *get_dfs_root(const char *path)
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001370{
1371 char *s, *npath;
1372
1373 s = strchr(path + 1, '\\');
1374 if (!s)
1375 return ERR_PTR(-EINVAL);
1376
1377 s = strchr(s + 1, '\\');
1378 if (!s)
1379 return ERR_PTR(-EINVAL);
1380
1381 npath = kstrndup(path, s - path, GFP_KERNEL);
1382 if (!npath)
1383 return ERR_PTR(-ENOMEM);
1384
1385 return npath;
1386}
1387
Paulo Alcantara (SUSE)345c1a42019-12-04 17:38:00 -03001388static inline void put_tcp_server(struct TCP_Server_Info *server)
1389{
1390 cifs_put_tcp_session(server, 0);
1391}
1392
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001393static struct TCP_Server_Info *get_tcp_server(struct smb3_fs_context *ctx)
Paulo Alcantara (SUSE)345c1a42019-12-04 17:38:00 -03001394{
1395 struct TCP_Server_Info *server;
1396
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001397 server = cifs_find_tcp_session(ctx);
Paulo Alcantara (SUSE)345c1a42019-12-04 17:38:00 -03001398 if (IS_ERR_OR_NULL(server))
1399 return NULL;
1400
1401 spin_lock(&GlobalMid_Lock);
1402 if (server->tcpStatus != CifsGood) {
1403 spin_unlock(&GlobalMid_Lock);
1404 put_tcp_server(server);
1405 return NULL;
1406 }
1407 spin_unlock(&GlobalMid_Lock);
1408
1409 return server;
1410}
1411
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001412/* Find root SMB session out of a DFS link path */
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001413static struct cifs_ses *find_root_ses(struct vol_info *vi,
1414 struct cifs_tcon *tcon,
1415 const char *path)
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001416{
1417 char *rpath;
1418 int rc;
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001419 struct cache_entry *ce;
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001420 struct dfs_info3_param ref = {0};
Ronnie Sahlberg0d4873f2021-01-28 21:35:10 -06001421 char *mdata = NULL, *devname = NULL;
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001422 struct TCP_Server_Info *server;
1423 struct cifs_ses *ses;
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001424 struct smb3_fs_context ctx = {NULL};
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001425
1426 rpath = get_dfs_root(path);
1427 if (IS_ERR(rpath))
1428 return ERR_CAST(rpath);
1429
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001430 down_read(&htable_rw_lock);
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001431
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001432 ce = lookup_cache_entry(rpath, NULL);
1433 if (IS_ERR(ce)) {
1434 up_read(&htable_rw_lock);
1435 ses = ERR_CAST(ce);
1436 goto out;
1437 }
1438
1439 rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001440 if (rc) {
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001441 up_read(&htable_rw_lock);
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001442 ses = ERR_PTR(rc);
1443 goto out;
1444 }
1445
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001446 up_read(&htable_rw_lock);
1447
Ronnie Sahlberg0d4873f2021-01-28 21:35:10 -06001448 mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
1449 &devname);
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001450 free_dfs_info_param(&ref);
1451
1452 if (IS_ERR(mdata)) {
1453 ses = ERR_CAST(mdata);
1454 mdata = NULL;
1455 goto out;
1456 }
1457
Ronnie Sahlberg0d4873f2021-01-28 21:35:10 -06001458 rc = cifs_setup_volume_info(&ctx, NULL, devname);
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001459
1460 if (rc) {
1461 ses = ERR_PTR(rc);
1462 goto out;
1463 }
1464
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001465 server = get_tcp_server(&ctx);
Paulo Alcantara (SUSE)345c1a42019-12-04 17:38:00 -03001466 if (!server) {
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001467 ses = ERR_PTR(-EHOSTDOWN);
1468 goto out;
1469 }
1470
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001471 ses = cifs_get_smb_ses(server, &ctx);
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001472
1473out:
Ronnie Sahlbergc741cba2020-12-14 16:40:16 +10001474 smb3_cleanup_fs_context_contents(&ctx);
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001475 kfree(mdata);
1476 kfree(rpath);
Ronnie Sahlberg0d4873f2021-01-28 21:35:10 -06001477 kfree(devname);
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001478
1479 return ses;
1480}
1481
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001482/* Refresh DFS cache entry from a given tcon */
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001483static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001484{
1485 int rc = 0;
1486 unsigned int xid;
1487 char *path, *npath;
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001488 struct cache_entry *ce;
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001489 struct cifs_ses *root_ses = NULL, *ses;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001490 struct dfs_info3_param *refs = NULL;
1491 int numrefs = 0;
1492
1493 xid = get_xid();
1494
1495 path = tcon->dfs_path + 1;
1496
1497 rc = get_normalized_path(path, &npath);
1498 if (rc)
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001499 goto out_free_xid;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001500
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001501 down_read(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001502
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001503 ce = lookup_cache_entry(npath, NULL);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001504 if (IS_ERR(ce)) {
1505 rc = PTR_ERR(ce);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001506 up_read(&htable_rw_lock);
1507 goto out_free_path;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001508 }
1509
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001510 if (!cache_entry_expired(ce)) {
1511 up_read(&htable_rw_lock);
1512 goto out_free_path;
1513 }
1514
1515 up_read(&htable_rw_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001516
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001517 /* If it's a DFS Link, then use root SMB session for refreshing it */
1518 if (is_dfs_link(npath)) {
1519 ses = root_ses = find_root_ses(vi, tcon, npath);
1520 if (IS_ERR(ses)) {
1521 rc = PTR_ERR(ses);
1522 root_ses = NULL;
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001523 goto out_free_path;
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001524 }
1525 } else {
1526 ses = tcon->ses;
1527 }
1528
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001529 rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
1530 &numrefs);
1531 if (!rc) {
1532 dump_refs(refs, numrefs);
1533 rc = update_cache_entry(npath, refs, numrefs);
1534 free_dfs_info_array(refs, numrefs);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001535 }
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001536
Paulo Alcantara (SUSE)50720102019-03-19 16:54:29 -03001537 if (root_ses)
1538 cifs_put_smb_ses(root_ses);
1539
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001540out_free_path:
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001541 free_normalized_path(path, npath);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001542
1543out_free_xid:
1544 free_xid(xid);
1545 return rc;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001546}
1547
1548/*
1549 * Worker that will refresh DFS cache based on lowest TTL value from a DFS
1550 * referral.
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001551 */
1552static void refresh_cache_worker(struct work_struct *work)
1553{
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001554 struct vol_info *vi, *nvi;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001555 struct TCP_Server_Info *server;
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001556 LIST_HEAD(vols);
1557 LIST_HEAD(tcons);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001558 struct cifs_tcon *tcon, *ntcon;
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001559 int rc;
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001560
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001561 /*
1562 * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
1563 * for refreshing.
1564 */
1565 spin_lock(&vol_list_lock);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001566 list_for_each_entry(vi, &vol_list, list) {
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001567 server = get_tcp_server(&vi->ctx);
Paulo Alcantara (SUSE)345c1a42019-12-04 17:38:00 -03001568 if (!server)
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001569 continue;
Paulo Alcantara (SUSE)345c1a42019-12-04 17:38:00 -03001570
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001571 kref_get(&vi->refcnt);
1572 list_add_tail(&vi->rlist, &vols);
1573 put_tcp_server(server);
1574 }
1575 spin_unlock(&vol_list_lock);
1576
1577 /* Walk through all TCONs and refresh any expired cache entry */
1578 list_for_each_entry_safe(vi, nvi, &vols, rlist) {
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -06001579 spin_lock(&vi->ctx_lock);
1580 server = get_tcp_server(&vi->ctx);
1581 spin_unlock(&vi->ctx_lock);
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001582
1583 if (!server)
1584 goto next_vol;
1585
1586 get_tcons(server, &tcons);
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001587 rc = 0;
1588
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001589 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
Paulo Alcantara (SUSE)742d8de2019-12-04 17:38:03 -03001590 /*
1591 * Skip tcp server if any of its tcons failed to refresh
1592 * (possibily due to reconnects).
1593 */
1594 if (!rc)
1595 rc = refresh_tcon(vi, tcon);
1596
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001597 list_del_init(&tcon->ulist);
1598 cifs_put_tcon(tcon);
1599 }
Paulo Alcantara (SUSE)345c1a42019-12-04 17:38:00 -03001600
1601 put_tcp_server(server);
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001602
1603next_vol:
1604 list_del_init(&vi->rlist);
1605 kref_put(&vi->refcnt, vol_release);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001606 }
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001607
1608 spin_lock(&cache_ttl_lock);
Paulo Alcantara (SUSE)185352a2019-12-04 17:37:58 -03001609 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
Paulo Alcantara (SUSE)06d57372019-12-04 17:38:02 -03001610 spin_unlock(&cache_ttl_lock);
Paulo Alcantara54be1f62018-11-14 16:01:21 -02001611}