blob: 2f5f2c4c6183c9461bf819b21781989046f6c338 [file] [log] [blame]
Steve French929be902021-06-18 00:31:49 -05001// SPDX-License-Identifier: LGPL-2.1
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04002/*
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04003 *
Steve French2b80d042013-06-23 18:43:37 -05004 * Copyright (C) International Business Machines Corp., 2009, 2013
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04005 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040011 */
12
13 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
14 /* Note that there are handle based routines which must be */
15 /* treated slightly differently for reconnection purposes since we never */
16 /* want to reuse a stale file handle and only the caller knows the file info */
17
18#include <linux/fs.h>
19#include <linux/kernel.h>
20#include <linux/vfs.h>
Pavel Shilovsky09a47072012-09-18 16:20:29 -070021#include <linux/task_io_accounting_ops.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040022#include <linux/uaccess.h>
Andrew Lunnc6e970a2017-03-28 23:45:06 +020023#include <linux/uuid.h>
Pavel Shilovsky33319142012-09-18 16:20:29 -070024#include <linux/pagemap.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040025#include <linux/xattr.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040026#include "cifsglob.h"
27#include "cifsacl.h"
28#include "cifsproto.h"
29#include "smb2proto.h"
30#include "cifs_unicode.h"
31#include "cifs_debug.h"
32#include "ntlmssp.h"
33#include "smb2status.h"
Pavel Shilovsky09a47072012-09-18 16:20:29 -070034#include "smb2glob.h"
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -070035#include "cifspdu.h"
Steve Frenchceb1b0b2015-09-24 00:52:37 -050036#include "cifs_spnego.h"
Long Lidb223a52017-11-22 17:38:45 -070037#include "smbdirect.h"
Steve Frencheccb4422018-05-17 21:16:55 -050038#include "trace.h"
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -020039#ifdef CONFIG_CIFS_DFS_UPCALL
40#include "dfs_cache.h"
41#endif
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040042
43/*
44 * The following table defines the expected "StructureSize" of SMB2 requests
45 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
46 *
47 * Note that commands are defined in smb2pdu.h in le16 but the array below is
48 * indexed by command in host byte order.
49 */
50static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
51 /* SMB2_NEGOTIATE */ 36,
52 /* SMB2_SESSION_SETUP */ 25,
53 /* SMB2_LOGOFF */ 4,
54 /* SMB2_TREE_CONNECT */ 9,
55 /* SMB2_TREE_DISCONNECT */ 4,
56 /* SMB2_CREATE */ 57,
57 /* SMB2_CLOSE */ 24,
58 /* SMB2_FLUSH */ 24,
59 /* SMB2_READ */ 49,
60 /* SMB2_WRITE */ 49,
61 /* SMB2_LOCK */ 48,
62 /* SMB2_IOCTL */ 57,
63 /* SMB2_CANCEL */ 4,
64 /* SMB2_ECHO */ 4,
65 /* SMB2_QUERY_DIRECTORY */ 33,
66 /* SMB2_CHANGE_NOTIFY */ 32,
67 /* SMB2_QUERY_INFO */ 41,
68 /* SMB2_SET_INFO */ 33,
69 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
70};
71
Ronnie Sahlberg730928c2018-08-08 15:07:49 +100072int smb3_encryption_required(const struct cifs_tcon *tcon)
Pavel Shilovsky7fb89862016-10-31 13:49:30 -070073{
Steve Frenchedb16132020-05-31 14:36:56 -050074 if (!tcon || !tcon->ses)
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -080075 return 0;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -070076 if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
77 (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
78 return 1;
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -080079 if (tcon->seal &&
80 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
81 return 1;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -070082 return 0;
83}
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040084
85static void
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +090086smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
Aurelien Aptel352d96f2020-05-31 12:38:22 -050087 const struct cifs_tcon *tcon,
88 struct TCP_Server_Info *server)
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040089{
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070090 shdr->ProtocolId = SMB2_PROTO_NUMBER;
91 shdr->StructureSize = cpu_to_le16(64);
92 shdr->Command = smb2_cmd;
Aurelien Aptel352d96f2020-05-31 12:38:22 -050093 if (server) {
Ross Lagerwall7d414f32016-09-20 13:37:13 +010094 spin_lock(&server->req_lock);
Steve French69dc4b12019-03-05 21:04:56 -060095 /* Request up to 10 credits but don't go over the limit. */
Steve French141891f2016-09-23 00:44:16 -050096 if (server->credits >= server->max_credits)
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070097 shdr->CreditRequest = cpu_to_le16(0);
Ross Lagerwall7d414f32016-09-20 13:37:13 +010098 else
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070099 shdr->CreditRequest = cpu_to_le16(
Steve French141891f2016-09-23 00:44:16 -0500100 min_t(int, server->max_credits -
Steve French69dc4b12019-03-05 21:04:56 -0600101 server->credits, 10));
Ross Lagerwall7d414f32016-09-20 13:37:13 +0100102 spin_unlock(&server->req_lock);
103 } else {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700104 shdr->CreditRequest = cpu_to_le16(2);
Ross Lagerwall7d414f32016-09-20 13:37:13 +0100105 }
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900106 shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400107
108 if (!tcon)
109 goto out;
110
Steve French2b80d042013-06-23 18:43:37 -0500111 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
112 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500113 if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700114 shdr->CreditCharge = cpu_to_le16(1);
Steve French2b80d042013-06-23 18:43:37 -0500115 /* else CreditCharge MBZ */
116
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900117 shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400118 /* Uid is not converted */
119 if (tcon->ses)
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900120 shdr->SessionId = cpu_to_le64(tcon->ses->Suid);
Steve Frenchf87ab882013-06-26 19:14:55 -0500121
122 /*
123 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
124 * to pass the path on the Open SMB prefixed by \\server\share.
125 * Not sure when we would need to do the augmented path (if ever) and
126 * setting this flag breaks the SMB2 open operation since it is
127 * illegal to send an empty path name (without \\server\share prefix)
128 * when the DFS flag is set in the SMB open header. We could
129 * consider setting the flag on all operations other than open
130 * but it is safer to net set it for now.
131 */
132/* if (tcon->share_flags & SHI1005_FLAGS_DFS)
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700133 shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
Steve Frenchf87ab882013-06-26 19:14:55 -0500134
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500135 if (server && server->sign && !smb3_encryption_required(tcon))
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700136 shdr->Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400137out:
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400138 return;
139}
140
141static int
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500142smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
143 struct TCP_Server_Info *server)
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400144{
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300145 int rc;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400146 struct nls_table *nls_codepage;
147 struct cifs_ses *ses;
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200148 int retries;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400149
150 /*
151 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
152 * check for tcp and smb session status done differently
153 * for those three - in the calling routine.
154 */
155 if (tcon == NULL)
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300156 return 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400157
Paulo Alcantarac88f7dc2021-11-03 13:53:29 -0300158 /*
159 * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in
160 * cifs_tree_connect().
161 */
162 if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300163 return 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400164
165 if (tcon->tidStatus == CifsExiting) {
166 /*
167 * only tree disconnect, open, and write,
168 * (and ulogoff which does not have tcon)
169 * are allowed as we start force umount.
170 */
171 if ((smb2_command != SMB2_WRITE) &&
172 (smb2_command != SMB2_CREATE) &&
173 (smb2_command != SMB2_TREE_DISCONNECT)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500174 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
175 smb2_command);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400176 return -ENODEV;
177 }
178 }
179 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500180 (!tcon->ses->server) || !server)
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400181 return -EIO;
182
183 ses = tcon->ses;
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200184 retries = server->nr_targets;
185
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400186 /*
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200187 * Give demultiplex thread up to 10 seconds to each target available for
188 * reconnect -- should be greater than cifs socket timeout which is 7
189 * seconds.
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400190 */
191 while (server->tcpStatus == CifsNeedReconnect) {
192 /*
193 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
194 * here since they are implicitly done when session drops.
195 */
196 switch (smb2_command) {
197 /*
198 * BB Should we keep oplock break and add flush to exceptions?
199 */
200 case SMB2_TREE_DISCONNECT:
201 case SMB2_CANCEL:
202 case SMB2_CLOSE:
203 case SMB2_OPLOCK_BREAK:
204 return -EAGAIN;
205 }
206
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300207 rc = wait_event_interruptible_timeout(server->response_q,
208 (server->tcpStatus != CifsNeedReconnect),
209 10 * HZ);
210 if (rc < 0) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700211 cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
212 __func__);
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300213 return -ERESTARTSYS;
214 }
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400215
216 /* are we still trying to reconnect? */
217 if (server->tcpStatus != CifsNeedReconnect)
218 break;
219
Ronnie Sahlbergc54849d2020-01-31 05:52:51 +1000220 if (retries && --retries)
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200221 continue;
222
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400223 /*
224 * on "soft" mounts we wait once. Hard mounts keep
225 * retrying until process is killed or server comes
226 * back on-line
227 */
228 if (!tcon->retry) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500229 cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400230 return -EHOSTDOWN;
231 }
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200232 retries = server->nr_targets;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400233 }
234
235 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300236 return 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400237
238 nls_codepage = load_nls_default();
239
240 /*
241 * need to prevent multiple threads trying to simultaneously reconnect
242 * the same SMB session
243 */
244 mutex_lock(&tcon->ses->session_mutex);
Samuel Cabrero76e75272017-07-11 12:44:39 +0200245
246 /*
247 * Recheck after acquire mutex. If another thread is negotiating
248 * and the server never sends an answer the socket will be closed
249 * and tcpStatus set to reconnect.
250 */
251 if (server->tcpStatus == CifsNeedReconnect) {
252 rc = -EHOSTDOWN;
253 mutex_unlock(&tcon->ses->session_mutex);
254 goto out;
255 }
256
Aurelien Aptel2f589672020-04-24 16:55:31 +0200257 /*
258 * If we are reconnecting an extra channel, bind
259 */
Shyam Prasad N0f2b3052021-07-19 11:26:24 +0000260 if (CIFS_SERVER_IS_CHAN(server)) {
Aurelien Aptel2f589672020-04-24 16:55:31 +0200261 ses->binding = true;
262 ses->binding_chan = cifs_ses_find_chan(ses, server);
263 }
264
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400265 rc = cifs_negotiate_protocol(0, tcon->ses);
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000266 if (!rc && tcon->ses->need_reconnect) {
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400267 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000268 if ((rc == -EACCES) && !tcon->retry) {
269 rc = -EHOSTDOWN;
Aurelien Aptel2f589672020-04-24 16:55:31 +0200270 ses->binding = false;
271 ses->binding_chan = NULL;
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000272 mutex_unlock(&tcon->ses->session_mutex);
273 goto failed;
274 }
275 }
Aurelien Aptel2f589672020-04-24 16:55:31 +0200276 /*
277 * End of channel binding
278 */
279 ses->binding = false;
280 ses->binding_chan = NULL;
281
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400282 if (rc || !tcon->need_reconnect) {
283 mutex_unlock(&tcon->ses->session_mutex);
284 goto out;
285 }
286
287 cifs_mark_open_files_invalid(tcon);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800288 if (tcon->use_persistent)
289 tcon->need_reopen_files = true;
Steve French52ace1e2016-09-22 19:23:56 -0500290
Stefan Metzmacher565674d2020-07-21 09:36:38 -0300291 rc = cifs_tree_connect(0, tcon, nls_codepage);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400292 mutex_unlock(&tcon->ses->session_mutex);
Steve French52ace1e2016-09-22 19:23:56 -0500293
Joe Perchesf96637b2013-05-04 22:12:25 -0500294 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
Steve Frenchc318e6c2018-04-04 14:08:52 -0500295 if (rc) {
296 /* If sess reconnected but tcon didn't, something strange ... */
Joe Perchesa0a30362020-04-14 22:42:53 -0700297 pr_warn_once("reconnect tcon failed rc = %d\n", rc);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400298 goto out;
Steve Frenchc318e6c2018-04-04 14:08:52 -0500299 }
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800300
301 if (smb2_command != SMB2_INTERNAL_CMD)
Stefan Metzmacherb08484d2020-02-24 14:14:59 +0100302 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800303
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400304 atomic_inc(&tconInfoReconnectCount);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400305out:
306 /*
307 * Check if handle based operation so we know whether we can continue
308 * or not without returning to caller to reset file handle.
309 */
310 /*
311 * BB Is flush done by server on drop of tcp session? Should we special
312 * case it and skip above?
313 */
314 switch (smb2_command) {
315 case SMB2_FLUSH:
316 case SMB2_READ:
317 case SMB2_WRITE:
318 case SMB2_LOCK:
319 case SMB2_IOCTL:
320 case SMB2_QUERY_DIRECTORY:
321 case SMB2_CHANGE_NOTIFY:
322 case SMB2_QUERY_INFO:
323 case SMB2_SET_INFO:
Pavel Shilovsky4772c792016-11-29 11:30:58 -0800324 rc = -EAGAIN;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400325 }
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000326failed:
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400327 unload_nls(nls_codepage);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400328 return rc;
329}
330
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700331static void
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500332fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
333 struct TCP_Server_Info *server,
334 void *buf,
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700335 unsigned int *total_len)
336{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900337 struct smb2_pdu *spdu = (struct smb2_pdu *)buf;
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700338 /* lookup word count ie StructureSize from table */
339 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
340
341 /*
342 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
343 * largest operations (Create)
344 */
345 memset(buf, 0, 256);
346
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900347 smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server);
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700348 spdu->StructureSize2 = cpu_to_le16(parmsize);
349
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900350 *total_len = parmsize + sizeof(struct smb2_hdr);
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700351}
352
Ronnie Sahlberg305428a2017-11-21 11:04:42 +1100353/*
354 * Allocate and return pointer to an SMB request hdr, and set basic
355 * SMB information in the SMB header. If the return code is zero, this
356 * function must have filled in request_buf pointer.
357 */
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300358static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500359 struct TCP_Server_Info *server,
360 void **request_buf, unsigned int *total_len)
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -0800361{
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -0800362 /* BB eventually switch this to SMB2 specific small buf size */
Stefano Briviof46ecbd2018-07-05 11:46:42 +0200363 if (smb2_command == SMB2_SET_INFO)
364 *request_buf = cifs_buf_get();
365 else
366 *request_buf = cifs_small_buf_get();
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -0800367 if (*request_buf == NULL) {
368 /* BB should we add a retry in here if not a writepage? */
369 return -ENOMEM;
370 }
371
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500372 fill_small_buf(smb2_command, tcon, server,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900373 (struct smb2_hdr *)(*request_buf),
Ronnie Sahlberg305428a2017-11-21 11:04:42 +1100374 total_len);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400375
376 if (tcon != NULL) {
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400377 uint16_t com_code = le16_to_cpu(smb2_command);
378 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400379 cifs_stats_inc(&tcon->num_smbs_sent);
380 }
381
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300382 return 0;
383}
384
385static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500386 struct TCP_Server_Info *server,
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300387 void **request_buf, unsigned int *total_len)
388{
389 int rc;
390
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500391 rc = smb2_reconnect(smb2_command, tcon, server);
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300392 if (rc)
393 return rc;
394
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500395 return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300396 total_len);
397}
398
399static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500400 struct TCP_Server_Info *server,
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300401 void **request_buf, unsigned int *total_len)
402{
403 /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
404 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500405 return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
406 request_buf, total_len);
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300407 }
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500408 return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
409 request_buf, total_len);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400410}
411
Steve Frenchd7bef4c2019-04-18 11:03:58 -0500412/* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500413
414static void
415build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
416{
417 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
418 pneg_ctxt->DataLength = cpu_to_le16(38);
419 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
Ronnie Sahlbergfc0b3842021-09-08 12:10:13 +1000420 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
421 get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500422 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
423}
424
425static void
Steve French26ea8882019-04-26 20:36:08 -0700426build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
427{
428 pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
429 pneg_ctxt->DataLength =
430 cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
431 - sizeof(struct smb2_neg_context));
432 pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3);
433 pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77;
434 pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF;
435 pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
436}
437
Steve French53d31a32021-07-05 15:05:39 -0500438static unsigned int
439build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
440{
441 unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities);
442 unsigned short num_algs = 1; /* number of signing algorithms sent */
443
444 pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
445 /*
446 * Context Data length must be rounded to multiple of 8 for some servers
447 */
448 pneg_ctxt->DataLength = cpu_to_le16(DIV_ROUND_UP(
449 sizeof(struct smb2_signing_capabilities) -
450 sizeof(struct smb2_neg_context) +
451 (num_algs * 2 /* sizeof u16 */), 8) * 8);
452 pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
453 pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
454
455 ctxt_len += 2 /* sizeof le16 */ * num_algs;
456 ctxt_len = DIV_ROUND_UP(ctxt_len, 8) * 8;
457 return ctxt_len;
458 /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
459}
460
Steve French26ea8882019-04-26 20:36:08 -0700461static void
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500462build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
463{
464 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
Steve Frenchfbfd0b42020-09-11 16:19:28 -0500465 if (require_gcm_256) {
466 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */
467 pneg_ctxt->CipherCount = cpu_to_le16(1);
468 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM;
Steve French29e27922020-10-14 20:24:09 -0500469 } else if (enable_gcm_256) {
470 pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */
471 pneg_ctxt->CipherCount = cpu_to_le16(3);
472 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
473 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM;
474 pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM;
Steve Frenchfbfd0b42020-09-11 16:19:28 -0500475 } else {
476 pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */
477 pneg_ctxt->CipherCount = cpu_to_le16(2);
478 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
479 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
480 }
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500481}
482
Steve French96d3cca2019-06-25 04:39:51 -0500483static unsigned int
484build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
485{
486 struct nls_table *cp = load_nls_default();
487
488 pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID;
489
490 /* copy up to max of first 100 bytes of server name to NetName field */
Steve Frenchdf58fae2019-08-05 17:07:26 -0500491 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
Steve French96d3cca2019-06-25 04:39:51 -0500492 /* context size is DataLength + minimal smb2_neg_context */
493 return DIV_ROUND_UP(le16_to_cpu(pneg_ctxt->DataLength) +
494 sizeof(struct smb2_neg_context), 8) * 8;
495}
496
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500497static void
Steve Frenchfcef0db2018-05-19 20:45:27 -0500498build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
499{
500 pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
501 pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
Steve French0d481322019-02-24 17:56:33 -0600502 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
503 pneg_ctxt->Name[0] = 0x93;
504 pneg_ctxt->Name[1] = 0xAD;
505 pneg_ctxt->Name[2] = 0x25;
506 pneg_ctxt->Name[3] = 0x50;
507 pneg_ctxt->Name[4] = 0x9C;
508 pneg_ctxt->Name[5] = 0xB4;
509 pneg_ctxt->Name[6] = 0x11;
510 pneg_ctxt->Name[7] = 0xE7;
511 pneg_ctxt->Name[8] = 0xB4;
512 pneg_ctxt->Name[9] = 0x23;
513 pneg_ctxt->Name[10] = 0x83;
514 pneg_ctxt->Name[11] = 0xDE;
515 pneg_ctxt->Name[12] = 0x96;
516 pneg_ctxt->Name[13] = 0x8B;
517 pneg_ctxt->Name[14] = 0xCD;
518 pneg_ctxt->Name[15] = 0x7C;
Steve Frenchfcef0db2018-05-19 20:45:27 -0500519}
520
521static void
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100522assemble_neg_contexts(struct smb2_negotiate_req *req,
Steve French9fe5ff12019-06-24 20:39:04 -0500523 struct TCP_Server_Info *server, unsigned int *total_len)
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500524{
Colin Ian Kinga9f76cf2019-12-02 18:59:42 +0000525 char *pneg_ctxt;
Steve French53d31a32021-07-05 15:05:39 -0500526 unsigned int ctxt_len, neg_context_count;
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500527
Steve Frenchd5c70762019-01-03 02:37:21 -0600528 if (*total_len > 200) {
529 /* In case length corrupted don't want to overrun smb buffer */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000530 cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n");
Steve Frenchd5c70762019-01-03 02:37:21 -0600531 return;
532 }
533
534 /*
535 * round up total_len of fixed part of SMB3 negotiate request to 8
536 * byte boundary before adding negotiate contexts
537 */
538 *total_len = roundup(*total_len, 8);
539
540 pneg_ctxt = (*total_len) + (char *)req;
541 req->NegotiateContextOffset = cpu_to_le32(*total_len);
542
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500543 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
Steve Frenchfcef0db2018-05-19 20:45:27 -0500544 ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_preauth_neg_context), 8) * 8;
545 *total_len += ctxt_len;
546 pneg_ctxt += ctxt_len;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100547
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500548 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
Steve Frenchfcef0db2018-05-19 20:45:27 -0500549 ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_encryption_neg_context), 8) * 8;
550 *total_len += ctxt_len;
551 pneg_ctxt += ctxt_len;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100552
Steve French53d31a32021-07-05 15:05:39 -0500553 ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
554 server->hostname);
555 *total_len += ctxt_len;
556 pneg_ctxt += ctxt_len;
557
558 build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
559 *total_len += sizeof(struct smb2_posix_neg_context);
560 pneg_ctxt += sizeof(struct smb2_posix_neg_context);
561
562 neg_context_count = 4;
563
Steve French9fe5ff12019-06-24 20:39:04 -0500564 if (server->compress_algorithm) {
565 build_compression_ctxt((struct smb2_compression_capabilities_context *)
Steve French26ea8882019-04-26 20:36:08 -0700566 pneg_ctxt);
Steve French9fe5ff12019-06-24 20:39:04 -0500567 ctxt_len = DIV_ROUND_UP(
568 sizeof(struct smb2_compression_capabilities_context),
569 8) * 8;
570 *total_len += ctxt_len;
571 pneg_ctxt += ctxt_len;
Steve French53d31a32021-07-05 15:05:39 -0500572 neg_context_count++;
573 }
Steve French96d3cca2019-06-25 04:39:51 -0500574
Steve French53d31a32021-07-05 15:05:39 -0500575 if (enable_negotiate_signing) {
576 ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *)
577 pneg_ctxt);
578 *total_len += ctxt_len;
579 pneg_ctxt += ctxt_len;
580 neg_context_count++;
581 }
Steve French96d3cca2019-06-25 04:39:51 -0500582
Steve French53d31a32021-07-05 15:05:39 -0500583 /* check for and add transport_capabilities and signing capabilities */
584 req->NegotiateContextCount = cpu_to_le16(neg_context_count);
585
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500586}
Steve French5100d8a2018-04-09 10:47:14 -0500587
588static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
589{
590 unsigned int len = le16_to_cpu(ctxt->DataLength);
591
592 /* If invalid preauth context warn but use what we requested, SHA-512 */
593 if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700594 pr_warn_once("server sent bad preauth context\n");
Steve French5100d8a2018-04-09 10:47:14 -0500595 return;
Steve French7955f102020-12-09 22:19:00 -0600596 } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
597 pr_warn_once("server sent invalid SaltLength\n");
598 return;
Steve French5100d8a2018-04-09 10:47:14 -0500599 }
600 if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
Joe Perchesa0a30362020-04-14 22:42:53 -0700601 pr_warn_once("Invalid SMB3 hash algorithm count\n");
Steve French5100d8a2018-04-09 10:47:14 -0500602 if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
Joe Perchesa0a30362020-04-14 22:42:53 -0700603 pr_warn_once("unknown SMB3 hash algorithm\n");
Steve French5100d8a2018-04-09 10:47:14 -0500604}
605
Steve French26ea8882019-04-26 20:36:08 -0700606static void decode_compress_ctx(struct TCP_Server_Info *server,
607 struct smb2_compression_capabilities_context *ctxt)
608{
609 unsigned int len = le16_to_cpu(ctxt->DataLength);
610
611 /* sizeof compress context is a one element compression capbility struct */
612 if (len < 10) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700613 pr_warn_once("server sent bad compression cntxt\n");
Steve French26ea8882019-04-26 20:36:08 -0700614 return;
615 }
616 if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700617 pr_warn_once("Invalid SMB3 compress algorithm count\n");
Steve French26ea8882019-04-26 20:36:08 -0700618 return;
619 }
620 if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700621 pr_warn_once("unknown compression algorithm\n");
Steve French26ea8882019-04-26 20:36:08 -0700622 return;
623 }
624 server->compress_algorithm = ctxt->CompressionAlgorithms[0];
625}
626
Steve French5100d8a2018-04-09 10:47:14 -0500627static int decode_encrypt_ctx(struct TCP_Server_Info *server,
628 struct smb2_encryption_neg_context *ctxt)
629{
630 unsigned int len = le16_to_cpu(ctxt->DataLength);
631
632 cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
633 if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700634 pr_warn_once("server sent bad crypto ctxt len\n");
Steve French5100d8a2018-04-09 10:47:14 -0500635 return -EINVAL;
636 }
637
638 if (le16_to_cpu(ctxt->CipherCount) != 1) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700639 pr_warn_once("Invalid SMB3.11 cipher count\n");
Steve French5100d8a2018-04-09 10:47:14 -0500640 return -EINVAL;
641 }
642 cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
Steve French511ac892020-10-15 00:14:47 -0500643 if (require_gcm_256) {
644 if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) {
645 cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n");
646 return -EOPNOTSUPP;
647 }
648 } else if (ctxt->Ciphers[0] == 0) {
Steve Frenchacf96fe2020-10-17 03:54:27 -0500649 /*
650 * e.g. if server only supported AES256_CCM (very unlikely)
651 * or server supported no encryption types or had all disabled.
652 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case
653 * in which mount requested encryption ("seal") checks later
654 * on during tree connection will return proper rc, but if
655 * seal not requested by client, since server is allowed to
656 * return 0 to indicate no supported cipher, we can't fail here
657 */
658 server->cipher_type = 0;
659 server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION;
660 pr_warn_once("Server does not support requested encryption types\n");
661 return 0;
Steve French511ac892020-10-15 00:14:47 -0500662 } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
663 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) &&
664 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) {
665 /* server returned a cipher we didn't ask for */
Joe Perchesa0a30362020-04-14 22:42:53 -0700666 pr_warn_once("Invalid SMB3.11 cipher returned\n");
Steve French5100d8a2018-04-09 10:47:14 -0500667 return -EINVAL;
668 }
669 server->cipher_type = ctxt->Ciphers[0];
Steve French23657ad2018-04-22 15:14:58 -0500670 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
Steve French5100d8a2018-04-09 10:47:14 -0500671 return 0;
672}
673
Steve French53d31a32021-07-05 15:05:39 -0500674static void decode_signing_ctx(struct TCP_Server_Info *server,
675 struct smb2_signing_capabilities *pctxt)
676{
677 unsigned int len = le16_to_cpu(pctxt->DataLength);
678
679 if ((len < 4) || (len > 16)) {
680 pr_warn_once("server sent bad signing negcontext\n");
681 return;
682 }
683 if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) {
684 pr_warn_once("Invalid signing algorithm count\n");
685 return;
686 }
687 if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) {
688 pr_warn_once("unknown signing algorithm\n");
689 return;
690 }
691
692 server->signing_negotiated = true;
693 server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]);
694 cifs_dbg(FYI, "signing algorithm %d chosen\n",
695 server->signing_algorithm);
696}
697
698
Steve French5100d8a2018-04-09 10:47:14 -0500699static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +1000700 struct TCP_Server_Info *server,
701 unsigned int len_of_smb)
Steve French5100d8a2018-04-09 10:47:14 -0500702{
703 struct smb2_neg_context *pctx;
704 unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
705 unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
Steve French5100d8a2018-04-09 10:47:14 -0500706 unsigned int len_of_ctxts, i;
707 int rc = 0;
708
709 cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
710 if (len_of_smb <= offset) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000711 cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n");
Steve French5100d8a2018-04-09 10:47:14 -0500712 return -EINVAL;
713 }
714
715 len_of_ctxts = len_of_smb - offset;
716
717 for (i = 0; i < ctxt_cnt; i++) {
718 int clen;
719 /* check that offset is not beyond end of SMB */
720 if (len_of_ctxts == 0)
721 break;
722
723 if (len_of_ctxts < sizeof(struct smb2_neg_context))
724 break;
725
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000726 pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
Steve French5100d8a2018-04-09 10:47:14 -0500727 clen = le16_to_cpu(pctx->DataLength);
728 if (clen > len_of_ctxts)
729 break;
730
731 if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
732 decode_preauth_context(
733 (struct smb2_preauth_neg_context *)pctx);
734 else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
735 rc = decode_encrypt_ctx(server,
736 (struct smb2_encryption_neg_context *)pctx);
Steve French26ea8882019-04-26 20:36:08 -0700737 else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES)
738 decode_compress_ctx(server,
739 (struct smb2_compression_capabilities_context *)pctx);
Steve Frenchfcef0db2018-05-19 20:45:27 -0500740 else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
741 server->posix_ext_supported = true;
Steve French53d31a32021-07-05 15:05:39 -0500742 else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES)
743 decode_signing_ctx(server,
744 (struct smb2_signing_capabilities *)pctx);
Steve French5100d8a2018-04-09 10:47:14 -0500745 else
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000746 cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
Steve French5100d8a2018-04-09 10:47:14 -0500747 le16_to_cpu(pctx->ContextType));
748
749 if (rc)
750 break;
751 /* offsets must be 8 byte aligned */
752 clen = (clen + 7) & ~0x7;
753 offset += clen + sizeof(struct smb2_neg_context);
754 len_of_ctxts -= clen;
755 }
756 return rc;
757}
758
Steve Frenchce558b02018-05-31 19:16:54 -0500759static struct create_posix *
760create_posix_buf(umode_t mode)
761{
762 struct create_posix *buf;
763
764 buf = kzalloc(sizeof(struct create_posix),
765 GFP_KERNEL);
766 if (!buf)
767 return NULL;
768
769 buf->ccontext.DataOffset =
770 cpu_to_le16(offsetof(struct create_posix, Mode));
771 buf->ccontext.DataLength = cpu_to_le32(4);
772 buf->ccontext.NameOffset =
773 cpu_to_le16(offsetof(struct create_posix, Name));
774 buf->ccontext.NameLength = cpu_to_le16(16);
775
776 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
777 buf->Name[0] = 0x93;
778 buf->Name[1] = 0xAD;
779 buf->Name[2] = 0x25;
780 buf->Name[3] = 0x50;
781 buf->Name[4] = 0x9C;
782 buf->Name[5] = 0xB4;
783 buf->Name[6] = 0x11;
784 buf->Name[7] = 0xE7;
785 buf->Name[8] = 0xB4;
786 buf->Name[9] = 0x23;
787 buf->Name[10] = 0x83;
788 buf->Name[11] = 0xDE;
789 buf->Name[12] = 0x96;
790 buf->Name[13] = 0x8B;
791 buf->Name[14] = 0xCD;
792 buf->Name[15] = 0x7C;
793 buf->Mode = cpu_to_le32(mode);
Joe Perchesa0a30362020-04-14 22:42:53 -0700794 cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
Steve Frenchce558b02018-05-31 19:16:54 -0500795 return buf;
796}
797
798static int
799add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
800{
801 struct smb2_create_req *req = iov[0].iov_base;
802 unsigned int num = *num_iovec;
803
804 iov[num].iov_base = create_posix_buf(mode);
Steve Frenchd0959b02019-10-05 10:53:58 -0500805 if (mode == ACL_NO_MODE)
Joe Perchesa0a30362020-04-14 22:42:53 -0700806 cifs_dbg(FYI, "Invalid mode\n");
Steve Frenchce558b02018-05-31 19:16:54 -0500807 if (iov[num].iov_base == NULL)
808 return -ENOMEM;
809 iov[num].iov_len = sizeof(struct create_posix);
810 if (!req->CreateContextsOffset)
811 req->CreateContextsOffset = cpu_to_le32(
812 sizeof(struct smb2_create_req) +
813 iov[num - 1].iov_len);
814 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_posix));
815 *num_iovec = num + 1;
816 return 0;
817}
818
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500819
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400820/*
821 *
822 * SMB2 Worker functions follow:
823 *
824 * The general structure of the worker functions is:
825 * 1) Call smb2_init (assembles SMB2 header)
826 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
827 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
828 * 4) Decode SMB2 command specific fields in the fixed length area
829 * 5) Decode variable length data area (if any for this SMB2 command type)
830 * 6) Call free smb buffer
831 * 7) return
832 *
833 */
834
835int
836SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
837{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +1000838 struct smb_rqst rqst;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400839 struct smb2_negotiate_req *req;
840 struct smb2_negotiate_rsp *rsp;
841 struct kvec iov[1];
Pavel Shilovskyda502f72016-10-25 11:38:47 -0700842 struct kvec rsp_iov;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400843 int rc = 0;
844 int resp_buftype;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200845 struct TCP_Server_Info *server = cifs_ses_server(ses);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400846 int blob_offset, blob_length;
847 char *security_blob;
848 int flags = CIFS_NEG_OP;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100849 unsigned int total_len;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400850
Joe Perchesf96637b2013-05-04 22:12:25 -0500851 cifs_dbg(FYI, "Negotiate protocol\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400852
Jeff Layton3534b852013-05-24 07:41:01 -0400853 if (!server) {
854 WARN(1, "%s: server is NULL!\n", __func__);
855 return -EIO;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400856 }
857
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500858 rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
859 (void **) &req, &total_len);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400860 if (rc)
861 return rc;
862
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900863 req->hdr.SessionId = 0;
Steve French0fdfef92018-06-28 19:30:23 -0500864
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100865 memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
866 memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400867
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200868 if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500869 SMB3ANY_VERSION_STRING) == 0) {
870 req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
871 req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
Steve French6dffa4c2021-02-02 00:03:58 -0600872 req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
873 req->DialectCount = cpu_to_le16(3);
874 total_len += 6;
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000875 } else if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500876 SMBDEFAULT_VERSION_STRING) == 0) {
877 req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
878 req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
879 req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
Steve Frenchd5c70762019-01-03 02:37:21 -0600880 req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
881 req->DialectCount = cpu_to_le16(4);
882 total_len += 8;
Steve French9764c022017-09-17 10:41:35 -0500883 } else {
884 /* otherwise send specific dialect */
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200885 req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
Steve French9764c022017-09-17 10:41:35 -0500886 req->DialectCount = cpu_to_le16(1);
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100887 total_len += 2;
Steve French9764c022017-09-17 10:41:35 -0500888 }
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400889
890 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400891 if (ses->sign)
Steve French9cd2e622013-06-12 19:59:03 -0500892 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400893 else if (global_secflags & CIFSSEC_MAY_SIGN)
Steve French9cd2e622013-06-12 19:59:03 -0500894 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400895 else
896 req->SecurityMode = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400897
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000898 req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
Steve French679971e2021-05-07 18:24:11 -0500899 if (ses->chan_max > 1)
900 req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400901
Steve French3c5f9be12014-05-13 13:37:45 -0700902 /* ClientGUID must be zero for SMB2.02 dialect */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000903 if (server->vals->protocol_id == SMB20_PROT_ID)
Steve French3c5f9be12014-05-13 13:37:45 -0700904 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500905 else {
Steve French3c5f9be12014-05-13 13:37:45 -0700906 memcpy(req->ClientGUID, server->client_guid,
907 SMB2_CLIENT_GUID_SIZE);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000908 if ((server->vals->protocol_id == SMB311_PROT_ID) ||
909 (strcmp(server->vals->version_string,
Steve French6dffa4c2021-02-02 00:03:58 -0600910 SMB3ANY_VERSION_STRING) == 0) ||
911 (strcmp(server->vals->version_string,
Steve Frenchd5c70762019-01-03 02:37:21 -0600912 SMBDEFAULT_VERSION_STRING) == 0))
Steve French9fe5ff12019-06-24 20:39:04 -0500913 assemble_neg_contexts(req, server, &total_len);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500914 }
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400915 iov[0].iov_base = (char *)req;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100916 iov[0].iov_len = total_len;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400917
Ronnie Sahlberg40eff452018-06-12 08:00:59 +1000918 memset(&rqst, 0, sizeof(struct smb_rqst));
919 rqst.rq_iov = iov;
920 rqst.rq_nvec = 1;
921
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500922 rc = cifs_send_recv(xid, ses, server,
923 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -0700924 cifs_small_buf_release(req);
925 rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400926 /*
927 * No tcon so can't do
928 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
929 */
Steve French7e682f72017-08-31 21:34:24 -0500930 if (rc == -EOPNOTSUPP) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700931 cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
Steve French7e682f72017-08-31 21:34:24 -0500932 goto neg_exit;
933 } else if (rc != 0)
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400934 goto neg_exit;
935
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000936 if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500937 SMB3ANY_VERSION_STRING) == 0) {
938 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000939 cifs_server_dbg(VFS,
Steve French9764c022017-09-17 10:41:35 -0500940 "SMB2 dialect returned but not requested\n");
941 return -EIO;
942 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000943 cifs_server_dbg(VFS,
Steve French9764c022017-09-17 10:41:35 -0500944 "SMB2.1 dialect returned but not requested\n");
945 return -EIO;
Steve French6dffa4c2021-02-02 00:03:58 -0600946 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
947 /* ops set to 3.0 by default for default so update */
948 server->ops = &smb311_operations;
949 server->vals = &smb311_values;
Steve French9764c022017-09-17 10:41:35 -0500950 }
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000951 } else if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500952 SMBDEFAULT_VERSION_STRING) == 0) {
953 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000954 cifs_server_dbg(VFS,
Steve French9764c022017-09-17 10:41:35 -0500955 "SMB2 dialect returned but not requested\n");
956 return -EIO;
957 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
958 /* ops set to 3.0 by default for default so update */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000959 server->ops = &smb21_operations;
960 server->vals = &smb21_values;
ZhangXiaoxub57a55e2019-04-06 15:30:38 +0800961 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000962 server->ops = &smb311_operations;
963 server->vals = &smb311_values;
ZhangXiaoxub57a55e2019-04-06 15:30:38 +0800964 }
Steve French590d08d2017-09-19 11:43:47 -0500965 } else if (le16_to_cpu(rsp->DialectRevision) !=
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000966 server->vals->protocol_id) {
Steve French9764c022017-09-17 10:41:35 -0500967 /* if requested single dialect ensure returned dialect matched */
Joe Perchesa0a30362020-04-14 22:42:53 -0700968 cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
969 le16_to_cpu(rsp->DialectRevision));
Steve French9764c022017-09-17 10:41:35 -0500970 return -EIO;
971 }
972
Joe Perchesf96637b2013-05-04 22:12:25 -0500973 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400974
Steve Frenche4aa25e2012-10-01 12:26:22 -0500975 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500976 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500977 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500978 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500979 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500980 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
Steve French20b6d8b2013-06-12 22:48:41 -0500981 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
982 cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
Steve French5f7fbf72014-12-17 22:52:58 -0600983 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
984 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400985 else {
Joe Perchesa0a30362020-04-14 22:42:53 -0700986 cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
987 le16_to_cpu(rsp->DialectRevision));
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400988 rc = -EIO;
989 goto neg_exit;
990 }
991 server->dialect = le16_to_cpu(rsp->DialectRevision);
992
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100993 /*
994 * Keep a copy of the hash after negprot. This hash will be
995 * the starting hash value for all sessions made from this
996 * server.
997 */
998 memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
999 SMB2_PREAUTH_HASH_SIZE);
Steve French0fdfef92018-06-28 19:30:23 -05001000
Jeff Laytone598d1d82013-05-26 07:00:59 -04001001 /* SMB2 only has an extended negflavor */
1002 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
Pavel Shilovsky2365c4e2014-02-14 13:31:02 +04001003 /* set it to the maximum buffer size value we can send with 1 credit */
1004 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
1005 SMB2_MAX_BUFFER_SIZE);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001006 server->max_read = le32_to_cpu(rsp->MaxReadSize);
1007 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001008 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
Steve French07108d02018-04-01 20:15:55 -05001009 if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
1010 cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
1011 server->sec_mode);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001012 server->capabilities = le32_to_cpu(rsp->Capabilities);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001013 /* Internal types */
1014 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001015
Aurelien Aptel6d2fcfe2021-05-21 17:19:27 +02001016 /*
1017 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
1018 * Set the cipher type manually.
1019 */
1020 if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
1021 server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
1022
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001023 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001024 (struct smb2_hdr *)rsp);
Steve French5d875cc2013-06-25 15:33:41 -05001025 /*
1026 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
1027 * for us will be
1028 * ses->sectype = RawNTLMSSP;
1029 * but for time being this is our only auth choice so doesn't matter.
1030 * We just found a server which sets blob length to zero expecting raw.
1031 */
Pavel Shilovsky67dbea22017-04-12 13:32:07 -07001032 if (blob_length == 0) {
Steve French5d875cc2013-06-25 15:33:41 -05001033 cifs_dbg(FYI, "missing security blob on negprot\n");
Pavel Shilovsky67dbea22017-04-12 13:32:07 -07001034 server->sec_ntlmssp = true;
1035 }
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001036
Jeff Layton38d77c52013-05-26 07:01:00 -04001037 rc = cifs_enable_signing(server, ses->sign);
Jeff Layton9ddec562013-05-26 07:00:58 -04001038 if (rc)
1039 goto neg_exit;
Steve Frenchceb1b0b2015-09-24 00:52:37 -05001040 if (blob_length) {
Steve Frenchebdd2072014-10-20 12:48:23 -05001041 rc = decode_negTokenInit(security_blob, blob_length, server);
Steve Frenchceb1b0b2015-09-24 00:52:37 -05001042 if (rc == 1)
1043 rc = 0;
1044 else if (rc == 0)
1045 rc = -EIO;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001046 }
Steve French5100d8a2018-04-09 10:47:14 -05001047
Steve French5100d8a2018-04-09 10:47:14 -05001048 if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
1049 if (rsp->NegotiateContextCount)
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10001050 rc = smb311_decode_neg_context(rsp, server,
1051 rsp_iov.iov_len);
Steve French5100d8a2018-04-09 10:47:14 -05001052 else
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001053 cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
Steve French5100d8a2018-04-09 10:47:14 -05001054 }
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001055neg_exit:
1056 free_rsp_buf(resp_buftype, rsp);
1057 return rc;
1058}
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001059
Steve Frenchff1c0382013-11-19 23:44:46 -06001060int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
1061{
Long Li2796d302018-04-25 11:30:04 -07001062 int rc;
1063 struct validate_negotiate_info_req *pneg_inbuf;
David Disseldorpfe83bebc2017-10-20 14:49:37 +02001064 struct validate_negotiate_info_rsp *pneg_rsp = NULL;
Steve Frenchff1c0382013-11-19 23:44:46 -06001065 u32 rsplen;
Steve French9764c022017-09-17 10:41:35 -05001066 u32 inbuflen; /* max of 4 dialects */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001067 struct TCP_Server_Info *server = tcon->ses->server;
Steve Frenchff1c0382013-11-19 23:44:46 -06001068
1069 cifs_dbg(FYI, "validate negotiate\n");
1070
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001071 /* In SMB3.11 preauth integrity supersedes validate negotiate */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001072 if (server->dialect == SMB311_PROT_ID)
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001073 return 0;
1074
Steve Frenchff1c0382013-11-19 23:44:46 -06001075 /*
1076 * validation ioctl must be signed, so no point sending this if we
Steve French0603c962017-09-20 19:57:18 -05001077 * can not sign it (ie are not known user). Even if signing is not
1078 * required (enabled but not negotiated), in those cases we selectively
Steve Frenchff1c0382013-11-19 23:44:46 -06001079 * sign just this, the first and only signed request on a connection.
Steve French0603c962017-09-20 19:57:18 -05001080 * Having validation of negotiate info helps reduce attack vectors.
Steve Frenchff1c0382013-11-19 23:44:46 -06001081 */
Steve French0603c962017-09-20 19:57:18 -05001082 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
Steve Frenchff1c0382013-11-19 23:44:46 -06001083 return 0; /* validation requires signing */
1084
Steve French0603c962017-09-20 19:57:18 -05001085 if (tcon->ses->user_name == NULL) {
1086 cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
1087 return 0; /* validation requires signing */
1088 }
1089
1090 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001091 cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
Steve French0603c962017-09-20 19:57:18 -05001092
Long Li2796d302018-04-25 11:30:04 -07001093 pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
1094 if (!pneg_inbuf)
1095 return -ENOMEM;
1096
1097 pneg_inbuf->Capabilities =
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001098 cpu_to_le32(server->vals->req_capabilities);
Steve French679971e2021-05-07 18:24:11 -05001099 if (tcon->ses->chan_max > 1)
1100 pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1101
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001102 memcpy(pneg_inbuf->Guid, server->client_guid,
Sachin Prabhu39552ea2014-05-13 00:48:12 +01001103 SMB2_CLIENT_GUID_SIZE);
Steve Frenchff1c0382013-11-19 23:44:46 -06001104
1105 if (tcon->ses->sign)
Long Li2796d302018-04-25 11:30:04 -07001106 pneg_inbuf->SecurityMode =
Steve Frenchff1c0382013-11-19 23:44:46 -06001107 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1108 else if (global_secflags & CIFSSEC_MAY_SIGN)
Long Li2796d302018-04-25 11:30:04 -07001109 pneg_inbuf->SecurityMode =
Steve Frenchff1c0382013-11-19 23:44:46 -06001110 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1111 else
Long Li2796d302018-04-25 11:30:04 -07001112 pneg_inbuf->SecurityMode = 0;
Steve Frenchff1c0382013-11-19 23:44:46 -06001113
Steve French9764c022017-09-17 10:41:35 -05001114
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001115 if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -05001116 SMB3ANY_VERSION_STRING) == 0) {
Long Li2796d302018-04-25 11:30:04 -07001117 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1118 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
Steve French6dffa4c2021-02-02 00:03:58 -06001119 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1120 pneg_inbuf->DialectCount = cpu_to_le16(3);
1121 /* SMB 2.1 not included so subtract one dialect from len */
Long Li2796d302018-04-25 11:30:04 -07001122 inbuflen = sizeof(*pneg_inbuf) -
Steve French6dffa4c2021-02-02 00:03:58 -06001123 (sizeof(pneg_inbuf->Dialects[0]));
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001124 } else if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -05001125 SMBDEFAULT_VERSION_STRING) == 0) {
Long Li2796d302018-04-25 11:30:04 -07001126 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1127 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1128 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
Steve Frenchd5c70762019-01-03 02:37:21 -06001129 pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1130 pneg_inbuf->DialectCount = cpu_to_le16(4);
Steve French6dffa4c2021-02-02 00:03:58 -06001131 /* structure is big enough for 4 dialects */
Long Li2796d302018-04-25 11:30:04 -07001132 inbuflen = sizeof(*pneg_inbuf);
Steve French9764c022017-09-17 10:41:35 -05001133 } else {
1134 /* otherwise specific dialect was requested */
Long Li2796d302018-04-25 11:30:04 -07001135 pneg_inbuf->Dialects[0] =
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001136 cpu_to_le16(server->vals->protocol_id);
Long Li2796d302018-04-25 11:30:04 -07001137 pneg_inbuf->DialectCount = cpu_to_le16(1);
Steve French9764c022017-09-17 10:41:35 -05001138 /* structure is big enough for 3 dialects, sending only 1 */
Long Li2796d302018-04-25 11:30:04 -07001139 inbuflen = sizeof(*pneg_inbuf) -
1140 sizeof(pneg_inbuf->Dialects[0]) * 2;
Steve French9764c022017-09-17 10:41:35 -05001141 }
Steve Frenchff1c0382013-11-19 23:44:46 -06001142
1143 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
1144 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001145 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
1146 (char **)&pneg_rsp, &rsplen);
Namjae Jeon969ae8e2019-01-22 09:46:45 +09001147 if (rc == -EOPNOTSUPP) {
1148 /*
1149 * Old Windows versions or Netapp SMB server can return
1150 * not supported error. Client should accept it.
1151 */
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001152 cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n");
Colin Ian King21078202019-05-17 09:12:33 +01001153 rc = 0;
1154 goto out_free_inbuf;
Namjae Jeon969ae8e2019-01-22 09:46:45 +09001155 } else if (rc != 0) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001156 cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
1157 rc);
Long Li2796d302018-04-25 11:30:04 -07001158 rc = -EIO;
1159 goto out_free_inbuf;
Steve Frenchff1c0382013-11-19 23:44:46 -06001160 }
1161
Long Li2796d302018-04-25 11:30:04 -07001162 rc = -EIO;
1163 if (rsplen != sizeof(*pneg_rsp)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001164 cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
1165 rsplen);
Steve French7db0a6e2017-05-03 21:12:20 -05001166
1167 /* relax check since Mac returns max bufsize allowed on ioctl */
Long Li2796d302018-04-25 11:30:04 -07001168 if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
1169 goto out_free_rsp;
Steve Frenchff1c0382013-11-19 23:44:46 -06001170 }
1171
1172 /* check validate negotiate info response matches what we got earlier */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001173 if (pneg_rsp->Dialect != cpu_to_le16(server->dialect))
Steve Frenchff1c0382013-11-19 23:44:46 -06001174 goto vneg_out;
1175
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001176 if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode))
Steve Frenchff1c0382013-11-19 23:44:46 -06001177 goto vneg_out;
1178
1179 /* do not validate server guid because not saved at negprot time yet */
1180
1181 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001182 SMB2_LARGE_FILES) != server->capabilities)
Steve Frenchff1c0382013-11-19 23:44:46 -06001183 goto vneg_out;
1184
1185 /* validate negotiate successful */
Long Li2796d302018-04-25 11:30:04 -07001186 rc = 0;
Steve Frenchff1c0382013-11-19 23:44:46 -06001187 cifs_dbg(FYI, "validate negotiate info successful\n");
Long Li2796d302018-04-25 11:30:04 -07001188 goto out_free_rsp;
Steve Frenchff1c0382013-11-19 23:44:46 -06001189
1190vneg_out:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001191 cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n");
Long Li2796d302018-04-25 11:30:04 -07001192out_free_rsp:
David Disseldorpfe83bebc2017-10-20 14:49:37 +02001193 kfree(pneg_rsp);
Long Li2796d302018-04-25 11:30:04 -07001194out_free_inbuf:
1195 kfree(pneg_inbuf);
1196 return rc;
Steve Frenchff1c0382013-11-19 23:44:46 -06001197}
1198
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301199enum securityEnum
1200smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
1201{
1202 switch (requested) {
1203 case Kerberos:
1204 case RawNTLMSSP:
1205 return requested;
1206 case NTLMv2:
1207 return RawNTLMSSP;
1208 case Unspecified:
1209 if (server->sec_ntlmssp &&
1210 (global_secflags & CIFSSEC_MAY_NTLMSSP))
1211 return RawNTLMSSP;
1212 if ((server->sec_kerberos || server->sec_mskerberos) &&
1213 (global_secflags & CIFSSEC_MAY_KRB5))
1214 return Kerberos;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001215 fallthrough;
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301216 default:
1217 return Unspecified;
1218 }
1219}
1220
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001221struct SMB2_sess_data {
1222 unsigned int xid;
1223 struct cifs_ses *ses;
1224 struct nls_table *nls_cp;
1225 void (*func)(struct SMB2_sess_data *);
1226 int result;
1227 u64 previous_session;
1228
1229 /* we will send the SMB in three pieces:
1230 * a fixed length beginning part, an optional
1231 * SPNEGO blob (which can be zero length), and a
1232 * last part which will include the strings
1233 * and rest of bcc area. This allows us to avoid
1234 * a large buffer 17K allocation
1235 */
1236 int buf0_type;
1237 struct kvec iov[2];
1238};
1239
1240static int
1241SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
1242{
1243 int rc;
1244 struct cifs_ses *ses = sess_data->ses;
1245 struct smb2_sess_setup_req *req;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001246 struct TCP_Server_Info *server = cifs_ses_server(ses);
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001247 unsigned int total_len;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001248
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001249 rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
1250 (void **) &req,
1251 &total_len);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001252 if (rc)
1253 return rc;
1254
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001255 if (sess_data->ses->binding) {
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001256 req->hdr.SessionId = cpu_to_le64(sess_data->ses->Suid);
1257 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001258 req->PreviousSessionId = 0;
1259 req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
1260 } else {
1261 /* First session, not a reauthenticate */
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001262 req->hdr.SessionId = 0;
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001263 /*
1264 * if reconnect, we need to send previous sess id
1265 * otherwise it is 0
1266 */
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10001267 req->PreviousSessionId = cpu_to_le64(sess_data->previous_session);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001268 req->Flags = 0; /* MBZ */
1269 }
Steve Frenchd4090142018-06-13 17:05:58 -05001270
1271 /* enough to enable echos and oplocks and one max size write */
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001272 req->hdr.CreditRequest = cpu_to_le16(130);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001273
1274 /* only one of SMB2 signing flags may be set in SMB2 request */
1275 if (server->sign)
1276 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
1277 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
1278 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
1279 else
1280 req->SecurityMode = 0;
1281
Steve French8d330962019-07-25 18:13:10 -05001282#ifdef CONFIG_CIFS_DFS_UPCALL
1283 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
1284#else
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001285 req->Capabilities = 0;
Steve French8d330962019-07-25 18:13:10 -05001286#endif /* DFS_UPCALL */
1287
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001288 req->Channel = 0; /* MBZ */
1289
1290 sess_data->iov[0].iov_base = (char *)req;
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001291 /* 1 for pad */
1292 sess_data->iov[0].iov_len = total_len - 1;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001293 /*
1294 * This variable will be used to clear the buffer
1295 * allocated above in case of any error in the calling function.
1296 */
1297 sess_data->buf0_type = CIFS_SMALL_BUFFER;
1298
1299 return 0;
1300}
1301
1302static void
1303SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
1304{
1305 free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
1306 sess_data->buf0_type = CIFS_NO_BUFFER;
1307}
1308
1309static int
1310SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
1311{
1312 int rc;
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001313 struct smb_rqst rqst;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001314 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001315 struct kvec rsp_iov = { NULL, 0 };
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001316
1317 /* Testing shows that buffer offset must be at location of Buffer[0] */
1318 req->SecurityBufferOffset =
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001319 cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001320 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
1321
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001322 memset(&rqst, 0, sizeof(struct smb_rqst));
1323 rqst.rq_iov = sess_data->iov;
1324 rqst.rq_nvec = 2;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001325
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001326 /* BB add code to build os and lm fields */
1327 rc = cifs_send_recv(sess_data->xid, sess_data->ses,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001328 cifs_ses_server(sess_data->ses),
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001329 &rqst,
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001330 &sess_data->buf0_type,
Shyam Prasad N0f56db82021-02-03 22:49:52 -08001331 CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001332 cifs_small_buf_release(sess_data->iov[0].iov_base);
1333 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001334
1335 return rc;
1336}
1337
1338static int
1339SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
1340{
1341 int rc = 0;
1342 struct cifs_ses *ses = sess_data->ses;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001343 struct TCP_Server_Info *server = cifs_ses_server(ses);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001344
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001345 mutex_lock(&server->srv_mutex);
1346 if (server->ops->generate_signingkey) {
1347 rc = server->ops->generate_signingkey(ses);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001348 if (rc) {
1349 cifs_dbg(FYI,
1350 "SMB3 session key generation failed\n");
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001351 mutex_unlock(&server->srv_mutex);
Pavel Shilovskycabfb362016-11-07 18:20:50 -08001352 return rc;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001353 }
1354 }
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001355 if (!server->session_estab) {
1356 server->sequence_number = 0x2;
1357 server->session_estab = true;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001358 }
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001359 mutex_unlock(&server->srv_mutex);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001360
1361 cifs_dbg(FYI, "SMB2/3 session established successfully\n");
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001362 /* keep existing ses state if binding */
1363 if (!ses->binding) {
1364 spin_lock(&GlobalMid_Lock);
1365 ses->status = CifsGood;
1366 ses->need_reconnect = false;
1367 spin_unlock(&GlobalMid_Lock);
1368 }
1369
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001370 return rc;
1371}
1372
1373#ifdef CONFIG_CIFS_UPCALL
1374static void
1375SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1376{
1377 int rc;
1378 struct cifs_ses *ses = sess_data->ses;
1379 struct cifs_spnego_msg *msg;
1380 struct key *spnego_key = NULL;
1381 struct smb2_sess_setup_rsp *rsp = NULL;
1382
1383 rc = SMB2_sess_alloc_buffer(sess_data);
1384 if (rc)
1385 goto out;
1386
1387 spnego_key = cifs_get_spnego_key(ses);
1388 if (IS_ERR(spnego_key)) {
1389 rc = PTR_ERR(spnego_key);
Steve French0a018942020-07-16 00:34:21 -05001390 if (rc == -ENOKEY)
1391 cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001392 spnego_key = NULL;
1393 goto out;
1394 }
1395
1396 msg = spnego_key->payload.data[0];
1397 /*
1398 * check version field to make sure that cifs.upcall is
1399 * sending us a response in an expected form
1400 */
1401 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001402 cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
1403 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001404 rc = -EKEYREJECTED;
1405 goto out_put_spnego_key;
1406 }
1407
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001408 /* keep session key if binding */
1409 if (!ses->binding) {
1410 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
1411 GFP_KERNEL);
1412 if (!ses->auth_key.response) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001413 cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001414 msg->sesskey_len);
1415 rc = -ENOMEM;
1416 goto out_put_spnego_key;
1417 }
1418 ses->auth_key.len = msg->sesskey_len;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001419 }
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001420
1421 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
1422 sess_data->iov[1].iov_len = msg->secblob_len;
1423
1424 rc = SMB2_sess_sendreceive(sess_data);
1425 if (rc)
1426 goto out_put_spnego_key;
1427
1428 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001429 /* keep session id and flags if binding */
1430 if (!ses->binding) {
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001431 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001432 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1433 }
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001434
1435 rc = SMB2_sess_establish_session(sess_data);
1436out_put_spnego_key:
1437 key_invalidate(spnego_key);
1438 key_put(spnego_key);
1439out:
1440 sess_data->result = rc;
1441 sess_data->func = NULL;
1442 SMB2_sess_free_buffer(sess_data);
1443}
1444#else
1445static void
1446SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1447{
1448 cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
1449 sess_data->result = -EOPNOTSUPP;
1450 sess_data->func = NULL;
1451}
1452#endif
1453
Sachin Prabhu166cea42016-10-07 19:11:22 +01001454static void
1455SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
1456
1457static void
1458SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
1459{
1460 int rc;
1461 struct cifs_ses *ses = sess_data->ses;
1462 struct smb2_sess_setup_rsp *rsp = NULL;
Shyam Prasad N49bd49f2021-11-05 19:03:57 +00001463 unsigned char *ntlmssp_blob = NULL;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001464 bool use_spnego = false; /* else use raw ntlmssp */
1465 u16 blob_length = 0;
1466
1467 /*
1468 * If memory allocation is successful, caller of this function
1469 * frees it.
1470 */
1471 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
1472 if (!ses->ntlmssp) {
1473 rc = -ENOMEM;
1474 goto out_err;
1475 }
1476 ses->ntlmssp->sesskey_per_smbsess = true;
1477
1478 rc = SMB2_sess_alloc_buffer(sess_data);
1479 if (rc)
1480 goto out_err;
1481
Shyam Prasad N49bd49f2021-11-05 19:03:57 +00001482 rc = build_ntlmssp_negotiate_blob(&ntlmssp_blob,
1483 &blob_length, ses,
1484 sess_data->nls_cp);
1485 if (rc)
1486 goto out_err;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001487
Sachin Prabhu166cea42016-10-07 19:11:22 +01001488 if (use_spnego) {
1489 /* BB eventually need to add this */
1490 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1491 rc = -EOPNOTSUPP;
1492 goto out;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001493 }
1494 sess_data->iov[1].iov_base = ntlmssp_blob;
1495 sess_data->iov[1].iov_len = blob_length;
1496
1497 rc = SMB2_sess_sendreceive(sess_data);
1498 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1499
1500 /* If true, rc here is expected and not an error */
1501 if (sess_data->buf0_type != CIFS_NO_BUFFER &&
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001502 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
Sachin Prabhu166cea42016-10-07 19:11:22 +01001503 rc = 0;
1504
1505 if (rc)
1506 goto out;
1507
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10001508 if (offsetof(struct smb2_sess_setup_rsp, Buffer) !=
Sachin Prabhu166cea42016-10-07 19:11:22 +01001509 le16_to_cpu(rsp->SecurityBufferOffset)) {
1510 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
1511 le16_to_cpu(rsp->SecurityBufferOffset));
1512 rc = -EIO;
1513 goto out;
1514 }
1515 rc = decode_ntlmssp_challenge(rsp->Buffer,
1516 le16_to_cpu(rsp->SecurityBufferLength), ses);
1517 if (rc)
1518 goto out;
1519
1520 cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
1521
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001522 /* keep existing ses id and flags if binding */
1523 if (!ses->binding) {
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001524 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001525 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1526 }
Sachin Prabhu166cea42016-10-07 19:11:22 +01001527
1528out:
1529 kfree(ntlmssp_blob);
1530 SMB2_sess_free_buffer(sess_data);
1531 if (!rc) {
1532 sess_data->result = 0;
1533 sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
1534 return;
1535 }
1536out_err:
1537 kfree(ses->ntlmssp);
1538 ses->ntlmssp = NULL;
1539 sess_data->result = rc;
1540 sess_data->func = NULL;
1541}
1542
1543static void
1544SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
1545{
1546 int rc;
1547 struct cifs_ses *ses = sess_data->ses;
1548 struct smb2_sess_setup_req *req;
1549 struct smb2_sess_setup_rsp *rsp = NULL;
1550 unsigned char *ntlmssp_blob = NULL;
1551 bool use_spnego = false; /* else use raw ntlmssp */
1552 u16 blob_length = 0;
1553
1554 rc = SMB2_sess_alloc_buffer(sess_data);
1555 if (rc)
1556 goto out;
1557
1558 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001559 req->hdr.SessionId = cpu_to_le64(ses->Suid);
Sachin Prabhu166cea42016-10-07 19:11:22 +01001560
1561 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
1562 sess_data->nls_cp);
1563 if (rc) {
1564 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
1565 goto out;
1566 }
1567
1568 if (use_spnego) {
1569 /* BB eventually need to add this */
1570 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1571 rc = -EOPNOTSUPP;
1572 goto out;
1573 }
1574 sess_data->iov[1].iov_base = ntlmssp_blob;
1575 sess_data->iov[1].iov_len = blob_length;
1576
1577 rc = SMB2_sess_sendreceive(sess_data);
1578 if (rc)
1579 goto out;
1580
1581 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1582
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001583 /* keep existing ses id and flags if binding */
1584 if (!ses->binding) {
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001585 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001586 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1587 }
Sachin Prabhu166cea42016-10-07 19:11:22 +01001588
1589 rc = SMB2_sess_establish_session(sess_data);
Ronnie Sahlbergf560cda2020-04-12 16:09:26 +10001590#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
1591 if (ses->server->dialect < SMB30_PROT_ID) {
1592 cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__);
1593 /*
1594 * The session id is opaque in terms of endianness, so we can't
1595 * print it as a long long. we dump it as we got it on the wire
1596 */
1597 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid),
1598 &ses->Suid);
1599 cifs_dbg(VFS, "Session Key %*ph\n",
1600 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
1601 cifs_dbg(VFS, "Signing Key %*ph\n",
1602 SMB3_SIGN_KEY_SIZE, ses->auth_key.response);
1603 }
1604#endif
Sachin Prabhu166cea42016-10-07 19:11:22 +01001605out:
1606 kfree(ntlmssp_blob);
1607 SMB2_sess_free_buffer(sess_data);
1608 kfree(ses->ntlmssp);
1609 ses->ntlmssp = NULL;
1610 sess_data->result = rc;
1611 sess_data->func = NULL;
1612}
1613
1614static int
1615SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
1616{
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301617 int type;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001618
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001619 type = smb2_select_sectype(cifs_ses_server(ses), ses->sectype);
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301620 cifs_dbg(FYI, "sess setup type %d\n", type);
1621 if (type == Unspecified) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001622 cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301623 return -EINVAL;
1624 }
1625
1626 switch (type) {
Sachin Prabhu166cea42016-10-07 19:11:22 +01001627 case Kerberos:
1628 sess_data->func = SMB2_auth_kerberos;
1629 break;
1630 case RawNTLMSSP:
1631 sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
1632 break;
1633 default:
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301634 cifs_dbg(VFS, "secType %d not supported!\n", type);
Sachin Prabhu166cea42016-10-07 19:11:22 +01001635 return -EOPNOTSUPP;
1636 }
1637
1638 return 0;
1639}
1640
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001641int
1642SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
1643 const struct nls_table *nls_cp)
1644{
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001645 int rc = 0;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001646 struct TCP_Server_Info *server = cifs_ses_server(ses);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001647 struct SMB2_sess_data *sess_data;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001648
Joe Perchesf96637b2013-05-04 22:12:25 -05001649 cifs_dbg(FYI, "Session Setup\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001650
Jeff Layton3534b852013-05-24 07:41:01 -04001651 if (!server) {
1652 WARN(1, "%s: server is NULL!\n", __func__);
1653 return -EIO;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001654 }
1655
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001656 sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
1657 if (!sess_data)
1658 return -ENOMEM;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001659
1660 rc = SMB2_select_sec(ses, sess_data);
1661 if (rc)
1662 goto out;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001663 sess_data->xid = xid;
1664 sess_data->ses = ses;
1665 sess_data->buf0_type = CIFS_NO_BUFFER;
1666 sess_data->nls_cp = (struct nls_table *) nls_cp;
Steve Frenchb2adf22f2018-05-31 15:19:25 -05001667 sess_data->previous_session = ses->Suid;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001668
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001669 /*
1670 * Initialize the session hash with the server one.
1671 */
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001672 memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001673 SMB2_PREAUTH_HASH_SIZE);
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001674
Sachin Prabhu166cea42016-10-07 19:11:22 +01001675 while (sess_data->func)
1676 sess_data->func(sess_data);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001677
Steve Frenchc721c382017-09-19 18:40:03 -05001678 if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001679 cifs_server_dbg(VFS, "signing requested but authenticated as guest\n");
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001680 rc = sess_data->result;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001681out:
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001682 kfree(sess_data);
1683 return rc;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001684}
1685
1686int
1687SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
1688{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001689 struct smb_rqst rqst;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001690 struct smb2_logoff_req *req; /* response is also trivial struct */
1691 int rc = 0;
1692 struct TCP_Server_Info *server;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001693 int flags = 0;
Ronnie Sahlberg45305ed2017-11-09 12:14:17 +11001694 unsigned int total_len;
1695 struct kvec iov[1];
1696 struct kvec rsp_iov;
1697 int resp_buf_type;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001698
Joe Perchesf96637b2013-05-04 22:12:25 -05001699 cifs_dbg(FYI, "disconnect session %p\n", ses);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001700
1701 if (ses && (ses->server))
1702 server = ses->server;
1703 else
1704 return -EIO;
1705
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -05001706 /* no need to send SMB logoff if uid already closed due to reconnect */
1707 if (ses->need_reconnect)
1708 goto smb2_session_already_dead;
1709
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001710 rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
1711 (void **) &req, &total_len);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001712 if (rc)
1713 return rc;
1714
1715 /* since no tcon, smb2_init can not do this, so do here */
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001716 req->hdr.SessionId = cpu_to_le64(ses->Suid);
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001717
1718 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
1719 flags |= CIFS_TRANSFORM_REQ;
1720 else if (server->sign)
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001721 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001722
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10001723 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg45305ed2017-11-09 12:14:17 +11001724
1725 iov[0].iov_base = (char *)req;
1726 iov[0].iov_len = total_len;
1727
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001728 memset(&rqst, 0, sizeof(struct smb_rqst));
1729 rqst.rq_iov = iov;
1730 rqst.rq_nvec = 1;
1731
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001732 rc = cifs_send_recv(xid, ses, ses->server,
1733 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001734 cifs_small_buf_release(req);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001735 /*
1736 * No tcon so can't do
1737 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
1738 */
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -05001739
1740smb2_session_already_dead:
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001741 return rc;
1742}
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001743
1744static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
1745{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001746 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001747}
1748
1749#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
1750
Steve Frenchde9f68df2013-11-15 11:26:24 -06001751/* These are similar values to what Windows uses */
1752static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
1753{
1754 tcon->max_chunks = 256;
1755 tcon->max_bytes_chunk = 1048576;
1756 tcon->max_bytes_copy = 16777216;
1757}
1758
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001759int
1760SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1761 struct cifs_tcon *tcon, const struct nls_table *cp)
1762{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001763 struct smb_rqst rqst;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001764 struct smb2_tree_connect_req *req;
1765 struct smb2_tree_connect_rsp *rsp = NULL;
1766 struct kvec iov[2];
Aurélien Apteldb3b5472017-10-11 13:23:36 +02001767 struct kvec rsp_iov = { NULL, 0 };
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001768 int rc = 0;
1769 int resp_buftype;
1770 int unc_path_len;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001771 __le16 *unc_path = NULL;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001772 int flags = 0;
Ronnie Sahlberg661bb9432017-11-09 12:14:23 +11001773 unsigned int total_len;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001774 struct TCP_Server_Info *server;
1775
1776 /* always use master channel */
1777 server = ses->server;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001778
Joe Perchesf96637b2013-05-04 22:12:25 -05001779 cifs_dbg(FYI, "TCON\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001780
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001781 if (!server || !tree)
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001782 return -EIO;
1783
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001784 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
1785 if (unc_path == NULL)
1786 return -ENOMEM;
1787
1788 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
1789 unc_path_len *= 2;
1790 if (unc_path_len < 2) {
1791 kfree(unc_path);
1792 return -EINVAL;
1793 }
1794
Jan-Marek Glogowski806a28e2017-02-20 12:25:58 +01001795 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
Aurelien Aptelb327a712018-01-24 13:46:10 +01001796 tcon->tid = 0;
Steve Frenchfae80442018-10-19 17:14:32 -05001797 atomic_set(&tcon->num_remote_opens, 0);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001798 rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
1799 (void **) &req, &total_len);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001800 if (rc) {
1801 kfree(unc_path);
1802 return rc;
1803 }
1804
Steve French5a77e752018-05-09 17:43:08 -05001805 if (smb3_encryption_required(tcon))
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -08001806 flags |= CIFS_TRANSFORM_REQ;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001807
1808 iov[0].iov_base = (char *)req;
Ronnie Sahlberg661bb9432017-11-09 12:14:23 +11001809 /* 1 for pad */
1810 iov[0].iov_len = total_len - 1;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001811
1812 /* Testing shows that buffer offset must be at location of Buffer[0] */
1813 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
Ronnie Sahlberg661bb9432017-11-09 12:14:23 +11001814 - 1 /* pad */);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001815 req->PathLength = cpu_to_le16(unc_path_len - 2);
1816 iov[1].iov_base = unc_path;
1817 iov[1].iov_len = unc_path_len;
1818
Ronnie Sahlberge71ab2a2019-03-21 14:59:02 +10001819 /*
1820 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
1821 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
Steve French8c11a602019-03-22 22:31:17 -05001822 * (Samba servers don't always set the flag so also check if null user)
Ronnie Sahlberge71ab2a2019-03-21 14:59:02 +10001823 */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001824 if ((server->dialect == SMB311_PROT_ID) &&
Ronnie Sahlberge71ab2a2019-03-21 14:59:02 +10001825 !smb3_encryption_required(tcon) &&
Steve French8c11a602019-03-22 22:31:17 -05001826 !(ses->session_flags &
1827 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
1828 ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001829 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Steve French6188f282018-03-13 02:29:36 -05001830
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001831 memset(&rqst, 0, sizeof(struct smb_rqst));
1832 rqst.rq_iov = iov;
1833 rqst.rq_nvec = 2;
1834
Steve French4fe75c42019-02-14 01:19:02 -06001835 /* Need 64 for max size write so ask for more in case not there yet */
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001836 req->hdr.CreditRequest = cpu_to_le16(64);
Steve French4fe75c42019-02-14 01:19:02 -06001837
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001838 rc = cifs_send_recv(xid, ses, server,
1839 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001840 cifs_small_buf_release(req);
1841 rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
Steve Frenchf8af49d2018-10-28 00:47:11 -05001842 trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
Steve Frenchbac35392021-11-11 16:18:14 -06001843 if ((rc != 0) || (rsp == NULL)) {
Steve French35591342021-06-19 12:01:37 -05001844 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
1845 tcon->need_reconnect = true;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001846 goto tcon_error_exit;
1847 }
1848
Christophe JAILLETcd123002017-05-12 17:59:32 +02001849 switch (rsp->ShareType) {
1850 case SMB2_SHARE_TYPE_DISK:
Joe Perchesf96637b2013-05-04 22:12:25 -05001851 cifs_dbg(FYI, "connection to disk share\n");
Christophe JAILLETcd123002017-05-12 17:59:32 +02001852 break;
1853 case SMB2_SHARE_TYPE_PIPE:
Aurelien Aptelb327a712018-01-24 13:46:10 +01001854 tcon->pipe = true;
Joe Perchesf96637b2013-05-04 22:12:25 -05001855 cifs_dbg(FYI, "connection to pipe share\n");
Christophe JAILLETcd123002017-05-12 17:59:32 +02001856 break;
1857 case SMB2_SHARE_TYPE_PRINT:
Aurelien Aptelb327a712018-01-24 13:46:10 +01001858 tcon->print = true;
Joe Perchesf96637b2013-05-04 22:12:25 -05001859 cifs_dbg(FYI, "connection to printer\n");
Christophe JAILLETcd123002017-05-12 17:59:32 +02001860 break;
1861 default:
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001862 cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001863 rc = -EOPNOTSUPP;
1864 goto tcon_error_exit;
1865 }
1866
1867 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
Steve French769ee6a2013-06-19 14:15:30 -05001868 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001869 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
1870 tcon->tidStatus = CifsGood;
1871 tcon->need_reconnect = false;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001872 tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
Zhao Hongjiang46b51d02013-06-24 01:57:47 -05001873 strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001874
1875 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
1876 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001877 cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n");
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -08001878
1879 if (tcon->seal &&
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001880 !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001881 cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n");
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -08001882
Steve Frenchde9f68df2013-11-15 11:26:24 -06001883 init_copy_chunk_defaults(tcon);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001884 if (server->ops->validate_negotiate)
1885 rc = server->ops->validate_negotiate(xid, tcon);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001886tcon_exit:
Steve Frenchf8af49d2018-10-28 00:47:11 -05001887
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001888 free_rsp_buf(resp_buftype, rsp);
1889 kfree(unc_path);
1890 return rc;
1891
1892tcon_error_exit:
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001893 if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME)
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001894 cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001895 goto tcon_exit;
1896}
1897
1898int
1899SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
1900{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001901 struct smb_rqst rqst;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001902 struct smb2_tree_disconnect_req *req; /* response is trivial */
1903 int rc = 0;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001904 struct cifs_ses *ses = tcon->ses;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001905 int flags = 0;
Ronnie Sahlberg4eecf4c2017-11-09 12:14:18 +11001906 unsigned int total_len;
1907 struct kvec iov[1];
1908 struct kvec rsp_iov;
1909 int resp_buf_type;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001910
Joe Perchesf96637b2013-05-04 22:12:25 -05001911 cifs_dbg(FYI, "Tree Disconnect\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001912
Christos Gkekas68a6afa2017-07-09 11:45:04 +01001913 if (!ses || !(ses->server))
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001914 return -EIO;
1915
1916 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
1917 return 0;
1918
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +10001919 close_cached_dir_lease(&tcon->crfid);
Ronnie Sahlberg72e73c72019-11-07 17:00:38 +10001920
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001921 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
1922 (void **) &req,
1923 &total_len);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001924 if (rc)
1925 return rc;
1926
Steve French5a77e752018-05-09 17:43:08 -05001927 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001928 flags |= CIFS_TRANSFORM_REQ;
1929
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10001930 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg4eecf4c2017-11-09 12:14:18 +11001931
1932 iov[0].iov_base = (char *)req;
1933 iov[0].iov_len = total_len;
1934
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001935 memset(&rqst, 0, sizeof(struct smb_rqst));
1936 rqst.rq_iov = iov;
1937 rqst.rq_nvec = 1;
1938
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001939 rc = cifs_send_recv(xid, ses, ses->server,
1940 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001941 cifs_small_buf_release(req);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001942 if (rc)
1943 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
1944
1945 return rc;
1946}
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001947
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001948
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001949static struct create_durable *
1950create_durable_buf(void)
1951{
1952 struct create_durable *buf;
1953
1954 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
1955 if (!buf)
1956 return NULL;
1957
1958 buf->ccontext.DataOffset = cpu_to_le16(offsetof
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001959 (struct create_durable, Data));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001960 buf->ccontext.DataLength = cpu_to_le32(16);
1961 buf->ccontext.NameOffset = cpu_to_le16(offsetof
1962 (struct create_durable, Name));
1963 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07001964 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001965 buf->Name[0] = 'D';
1966 buf->Name[1] = 'H';
1967 buf->Name[2] = 'n';
1968 buf->Name[3] = 'Q';
1969 return buf;
1970}
1971
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001972static struct create_durable *
1973create_reconnect_durable_buf(struct cifs_fid *fid)
1974{
1975 struct create_durable *buf;
1976
1977 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
1978 if (!buf)
1979 return NULL;
1980
1981 buf->ccontext.DataOffset = cpu_to_le16(offsetof
1982 (struct create_durable, Data));
1983 buf->ccontext.DataLength = cpu_to_le32(16);
1984 buf->ccontext.NameOffset = cpu_to_le16(offsetof
1985 (struct create_durable, Name));
1986 buf->ccontext.NameLength = cpu_to_le16(4);
1987 buf->Data.Fid.PersistentFileId = fid->persistent_fid;
1988 buf->Data.Fid.VolatileFileId = fid->volatile_fid;
Steve French12197a72014-05-14 05:29:40 -07001989 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001990 buf->Name[0] = 'D';
1991 buf->Name[1] = 'H';
1992 buf->Name[2] = 'n';
1993 buf->Name[3] = 'C';
1994 return buf;
1995}
1996
Steve French89a5bfa2019-07-18 17:22:18 -05001997static void
1998parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
1999{
2000 struct create_on_disk_id *pdisk_id = (struct create_on_disk_id *)cc;
2001
2002 cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n",
2003 pdisk_id->DiskFileId, pdisk_id->VolumeId);
2004 buf->IndexNumber = pdisk_id->DiskFileId;
2005}
2006
Steve Frenchab3459d2020-02-06 17:31:56 -06002007static void
Aurelien Aptel69dda302020-03-02 17:53:22 +01002008parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
2009 struct create_posix_rsp *posix)
Steve Frenchab3459d2020-02-06 17:31:56 -06002010{
Aurelien Aptel69dda302020-03-02 17:53:22 +01002011 int sid_len;
2012 u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset);
2013 u8 *end = beg + le32_to_cpu(cc->DataLength);
2014 u8 *sid;
Steve Frenchab3459d2020-02-06 17:31:56 -06002015
Aurelien Aptel69dda302020-03-02 17:53:22 +01002016 memset(posix, 0, sizeof(*posix));
Aurelien Aptel2e8af972020-02-08 15:50:56 +01002017
Aurelien Aptel69dda302020-03-02 17:53:22 +01002018 posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0));
2019 posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4));
2020 posix->mode = le32_to_cpu(*(__le32 *)(beg + 8));
2021
2022 sid = beg + 12;
2023 sid_len = posix_info_sid_size(sid, end);
2024 if (sid_len < 0) {
2025 cifs_dbg(VFS, "bad owner sid in posix create response\n");
2026 return;
2027 }
2028 memcpy(&posix->owner, sid, sid_len);
2029
2030 sid = sid + sid_len;
2031 sid_len = posix_info_sid_size(sid, end);
2032 if (sid_len < 0) {
2033 cifs_dbg(VFS, "bad group sid in posix create response\n");
2034 return;
2035 }
2036 memcpy(&posix->group, sid, sid_len);
2037
2038 cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n",
2039 posix->nlink, posix->mode, posix->reparse_tag);
Steve Frenchab3459d2020-02-06 17:31:56 -06002040}
2041
Steve French89a5bfa2019-07-18 17:22:18 -05002042void
2043smb2_parse_contexts(struct TCP_Server_Info *server,
Aurelien Aptel69dda302020-03-02 17:53:22 +01002044 struct smb2_create_rsp *rsp,
2045 unsigned int *epoch, char *lease_key, __u8 *oplock,
2046 struct smb2_file_all_info *buf,
2047 struct create_posix_rsp *posix)
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002048{
2049 char *data_offset;
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002050 struct create_context *cc;
Justin Maggarddeb7def2016-02-09 15:52:08 -08002051 unsigned int next;
2052 unsigned int remaining;
Pavel Shilovskyfd554392013-07-09 19:44:56 +04002053 char *name;
Colin Ian King3ece60e2020-10-20 15:19:36 +01002054 static const char smb3_create_tag_posix[] = {
2055 0x93, 0xAD, 0x25, 0x50, 0x9C,
2056 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83,
2057 0xDE, 0x96, 0x8B, 0xCD, 0x7C
2058 };
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002059
Steve French89a5bfa2019-07-18 17:22:18 -05002060 *oplock = 0;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002061 data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
Justin Maggarddeb7def2016-02-09 15:52:08 -08002062 remaining = le32_to_cpu(rsp->CreateContextsLength);
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002063 cc = (struct create_context *)data_offset;
Steve French89a5bfa2019-07-18 17:22:18 -05002064
2065 /* Initialize inode number to 0 in case no valid data in qfid context */
2066 if (buf)
2067 buf->IndexNumber = 0;
2068
Justin Maggarddeb7def2016-02-09 15:52:08 -08002069 while (remaining >= sizeof(struct create_context)) {
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002070 name = le16_to_cpu(cc->NameOffset) + (char *)cc;
Justin Maggarddeb7def2016-02-09 15:52:08 -08002071 if (le16_to_cpu(cc->NameLength) == 4 &&
Steve French89a5bfa2019-07-18 17:22:18 -05002072 strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0)
2073 *oplock = server->ops->parse_lease_buf(cc, epoch,
2074 lease_key);
2075 else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
2076 strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
2077 parse_query_id_ctxt(cc, buf);
Steve Frenchab3459d2020-02-06 17:31:56 -06002078 else if ((le16_to_cpu(cc->NameLength) == 16)) {
Aurelien Aptel69dda302020-03-02 17:53:22 +01002079 if (posix &&
2080 memcmp(name, smb3_create_tag_posix, 16) == 0)
2081 parse_posix_ctxt(cc, buf, posix);
Steve Frenchab3459d2020-02-06 17:31:56 -06002082 }
2083 /* else {
2084 cifs_dbg(FYI, "Context not matched with len %d\n",
2085 le16_to_cpu(cc->NameLength));
2086 cifs_dump_mem("Cctxt name: ", name, 4);
2087 } */
Justin Maggarddeb7def2016-02-09 15:52:08 -08002088
2089 next = le32_to_cpu(cc->Next);
2090 if (!next)
2091 break;
2092 remaining -= next;
2093 cc = (struct create_context *)((char *)cc + next);
2094 }
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002095
Steve French89a5bfa2019-07-18 17:22:18 -05002096 if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
2097 *oplock = rsp->OplockLevel;
2098
2099 return;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002100}
2101
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002102static int
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002103add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
Stefano Brivio729c0c92018-07-05 15:10:02 +02002104 unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002105{
2106 struct smb2_create_req *req = iov[0].iov_base;
2107 unsigned int num = *num_iovec;
2108
Stefano Brivio729c0c92018-07-05 15:10:02 +02002109 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002110 if (iov[num].iov_base == NULL)
2111 return -ENOMEM;
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002112 iov[num].iov_len = server->vals->create_lease_size;
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002113 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
2114 if (!req->CreateContextsOffset)
2115 req->CreateContextsOffset = cpu_to_le32(
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002116 sizeof(struct smb2_create_req) +
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002117 iov[num - 1].iov_len);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002118 le32_add_cpu(&req->CreateContextsLength,
2119 server->vals->create_lease_size);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002120 *num_iovec = num + 1;
2121 return 0;
2122}
2123
Steve Frenchb56eae42015-11-03 09:26:27 -06002124static struct create_durable_v2 *
Steve Frenchca567eb2019-03-29 16:31:07 -05002125create_durable_v2_buf(struct cifs_open_parms *oparms)
Steve Frenchb56eae42015-11-03 09:26:27 -06002126{
Steve Frenchca567eb2019-03-29 16:31:07 -05002127 struct cifs_fid *pfid = oparms->fid;
Steve Frenchb56eae42015-11-03 09:26:27 -06002128 struct create_durable_v2 *buf;
2129
2130 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
2131 if (!buf)
2132 return NULL;
2133
2134 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2135 (struct create_durable_v2, dcontext));
2136 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
2137 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2138 (struct create_durable_v2, Name));
2139 buf->ccontext.NameLength = cpu_to_le16(4);
2140
Steve Frenchca567eb2019-03-29 16:31:07 -05002141 /*
2142 * NB: Handle timeout defaults to 0, which allows server to choose
2143 * (most servers default to 120 seconds) and most clients default to 0.
2144 * This can be overridden at mount ("handletimeout=") if the user wants
2145 * a different persistent (or resilient) handle timeout for all opens
2146 * opens on a particular SMB3 mount.
2147 */
2148 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
Steve Frenchb56eae42015-11-03 09:26:27 -06002149 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
Steve Frenchfa70b872016-09-22 00:39:34 -05002150 generate_random_uuid(buf->dcontext.CreateGuid);
Steve Frenchb56eae42015-11-03 09:26:27 -06002151 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
2152
2153 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
2154 buf->Name[0] = 'D';
2155 buf->Name[1] = 'H';
2156 buf->Name[2] = '2';
2157 buf->Name[3] = 'Q';
2158 return buf;
2159}
2160
2161static struct create_durable_handle_reconnect_v2 *
2162create_reconnect_durable_v2_buf(struct cifs_fid *fid)
2163{
2164 struct create_durable_handle_reconnect_v2 *buf;
2165
2166 buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
2167 GFP_KERNEL);
2168 if (!buf)
2169 return NULL;
2170
2171 buf->ccontext.DataOffset =
2172 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2173 dcontext));
2174 buf->ccontext.DataLength =
2175 cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
2176 buf->ccontext.NameOffset =
2177 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2178 Name));
2179 buf->ccontext.NameLength = cpu_to_le16(4);
2180
2181 buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
2182 buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
2183 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2184 memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
2185
2186 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
2187 buf->Name[0] = 'D';
2188 buf->Name[1] = 'H';
2189 buf->Name[2] = '2';
2190 buf->Name[3] = 'C';
2191 return buf;
2192}
2193
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002194static int
Steve Frenchb56eae42015-11-03 09:26:27 -06002195add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002196 struct cifs_open_parms *oparms)
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002197{
2198 struct smb2_create_req *req = iov[0].iov_base;
2199 unsigned int num = *num_iovec;
2200
Steve Frenchca567eb2019-03-29 16:31:07 -05002201 iov[num].iov_base = create_durable_v2_buf(oparms);
Steve Frenchb56eae42015-11-03 09:26:27 -06002202 if (iov[num].iov_base == NULL)
2203 return -ENOMEM;
2204 iov[num].iov_len = sizeof(struct create_durable_v2);
2205 if (!req->CreateContextsOffset)
2206 req->CreateContextsOffset =
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002207 cpu_to_le32(sizeof(struct smb2_create_req) +
Steve Frenchb56eae42015-11-03 09:26:27 -06002208 iov[1].iov_len);
2209 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
Steve Frenchb56eae42015-11-03 09:26:27 -06002210 *num_iovec = num + 1;
2211 return 0;
2212}
2213
2214static int
2215add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
2216 struct cifs_open_parms *oparms)
2217{
2218 struct smb2_create_req *req = iov[0].iov_base;
2219 unsigned int num = *num_iovec;
2220
2221 /* indicate that we don't need to relock the file */
2222 oparms->reconnect = false;
2223
2224 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
2225 if (iov[num].iov_base == NULL)
2226 return -ENOMEM;
2227 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
2228 if (!req->CreateContextsOffset)
2229 req->CreateContextsOffset =
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002230 cpu_to_le32(sizeof(struct smb2_create_req) +
Steve Frenchb56eae42015-11-03 09:26:27 -06002231 iov[1].iov_len);
2232 le32_add_cpu(&req->CreateContextsLength,
2233 sizeof(struct create_durable_handle_reconnect_v2));
Steve Frenchb56eae42015-11-03 09:26:27 -06002234 *num_iovec = num + 1;
2235 return 0;
2236}
2237
2238static int
2239add_durable_context(struct kvec *iov, unsigned int *num_iovec,
2240 struct cifs_open_parms *oparms, bool use_persistent)
2241{
2242 struct smb2_create_req *req = iov[0].iov_base;
2243 unsigned int num = *num_iovec;
2244
2245 if (use_persistent) {
2246 if (oparms->reconnect)
2247 return add_durable_reconnect_v2_context(iov, num_iovec,
2248 oparms);
2249 else
2250 return add_durable_v2_context(iov, num_iovec, oparms);
2251 }
2252
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002253 if (oparms->reconnect) {
2254 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
2255 /* indicate that we don't need to relock the file */
2256 oparms->reconnect = false;
2257 } else
2258 iov[num].iov_base = create_durable_buf();
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002259 if (iov[num].iov_base == NULL)
2260 return -ENOMEM;
2261 iov[num].iov_len = sizeof(struct create_durable);
2262 if (!req->CreateContextsOffset)
2263 req->CreateContextsOffset =
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002264 cpu_to_le32(sizeof(struct smb2_create_req) +
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002265 iov[1].iov_len);
Wei Yongjun31f92e92013-08-26 14:34:46 +08002266 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002267 *num_iovec = num + 1;
2268 return 0;
2269}
2270
Steve Frenchcdeaf9d2018-08-10 02:25:06 -05002271/* See MS-SMB2 2.2.13.2.7 */
2272static struct crt_twarp_ctxt *
2273create_twarp_buf(__u64 timewarp)
2274{
2275 struct crt_twarp_ctxt *buf;
2276
2277 buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
2278 if (!buf)
2279 return NULL;
2280
2281 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2282 (struct crt_twarp_ctxt, Timestamp));
2283 buf->ccontext.DataLength = cpu_to_le32(8);
2284 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2285 (struct crt_twarp_ctxt, Name));
2286 buf->ccontext.NameLength = cpu_to_le16(4);
2287 /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
2288 buf->Name[0] = 'T';
2289 buf->Name[1] = 'W';
2290 buf->Name[2] = 'r';
2291 buf->Name[3] = 'p';
2292 buf->Timestamp = cpu_to_le64(timewarp);
2293 return buf;
2294}
2295
2296/* See MS-SMB2 2.2.13.2.7 */
2297static int
2298add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
2299{
2300 struct smb2_create_req *req = iov[0].iov_base;
2301 unsigned int num = *num_iovec;
2302
2303 iov[num].iov_base = create_twarp_buf(timewarp);
2304 if (iov[num].iov_base == NULL)
2305 return -ENOMEM;
2306 iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
2307 if (!req->CreateContextsOffset)
2308 req->CreateContextsOffset = cpu_to_le32(
2309 sizeof(struct smb2_create_req) +
2310 iov[num - 1].iov_len);
2311 le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt));
2312 *num_iovec = num + 1;
2313 return 0;
2314}
2315
Steve French975221e2020-06-12 09:25:21 -05002316/* See See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
2317static void setup_owner_group_sids(char *buf)
2318{
2319 struct owner_group_sids *sids = (struct owner_group_sids *)buf;
2320
2321 /* Populate the user ownership fields S-1-5-88-1 */
2322 sids->owner.Revision = 1;
2323 sids->owner.NumAuth = 3;
2324 sids->owner.Authority[5] = 5;
2325 sids->owner.SubAuthorities[0] = cpu_to_le32(88);
2326 sids->owner.SubAuthorities[1] = cpu_to_le32(1);
2327 sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val);
2328
2329 /* Populate the group ownership fields S-1-5-88-2 */
2330 sids->group.Revision = 1;
2331 sids->group.NumAuth = 3;
2332 sids->group.Authority[5] = 5;
2333 sids->group.SubAuthorities[0] = cpu_to_le32(88);
2334 sids->group.SubAuthorities[1] = cpu_to_le32(2);
2335 sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val);
Steve Frencha7a519a2020-06-12 14:49:47 -05002336
2337 cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val);
Steve French975221e2020-06-12 09:25:21 -05002338}
2339
Steve Frenchfdef6652019-12-06 02:02:38 -06002340/* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */
2341static struct crt_sd_ctxt *
Steve French975221e2020-06-12 09:25:21 -05002342create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
Steve Frenchfdef6652019-12-06 02:02:38 -06002343{
2344 struct crt_sd_ctxt *buf;
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002345 __u8 *ptr, *aclptr;
2346 unsigned int acelen, acl_size, ace_count;
Steve French975221e2020-06-12 09:25:21 -05002347 unsigned int owner_offset = 0;
2348 unsigned int group_offset = 0;
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002349 struct smb3_acl acl;
Steve Frenchfdef6652019-12-06 02:02:38 -06002350
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002351 *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
Steve French975221e2020-06-12 09:25:21 -05002352
2353 if (set_owner) {
Steve French975221e2020-06-12 09:25:21 -05002354 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
2355 *len += sizeof(struct owner_group_sids);
2356 }
2357
Steve Frenchfdef6652019-12-06 02:02:38 -06002358 buf = kzalloc(*len, GFP_KERNEL);
2359 if (buf == NULL)
2360 return buf;
2361
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002362 ptr = (__u8 *)&buf[1];
Steve French975221e2020-06-12 09:25:21 -05002363 if (set_owner) {
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002364 /* offset fields are from beginning of security descriptor not of create context */
2365 owner_offset = ptr - (__u8 *)&buf->sd;
Steve French975221e2020-06-12 09:25:21 -05002366 buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002367 group_offset = owner_offset + offsetof(struct owner_group_sids, group);
Steve French975221e2020-06-12 09:25:21 -05002368 buf->sd.OffsetGroup = cpu_to_le32(group_offset);
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002369
2370 setup_owner_group_sids(ptr);
2371 ptr += sizeof(struct owner_group_sids);
Steve French975221e2020-06-12 09:25:21 -05002372 } else {
2373 buf->sd.OffsetOwner = 0;
2374 buf->sd.OffsetGroup = 0;
2375 }
2376
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002377 buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
Steve French975221e2020-06-12 09:25:21 -05002378 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
Steve Frenchfdef6652019-12-06 02:02:38 -06002379 buf->ccontext.NameLength = cpu_to_le16(4);
2380 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
2381 buf->Name[0] = 'S';
2382 buf->Name[1] = 'e';
2383 buf->Name[2] = 'c';
2384 buf->Name[3] = 'D';
2385 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002386
Steve Frenchfdef6652019-12-06 02:02:38 -06002387 /*
2388 * ACL is "self relative" ie ACL is stored in contiguous block of memory
2389 * and "DP" ie the DACL is present
2390 */
2391 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
2392
2393 /* offset owner, group and Sbz1 and SACL are all zero */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002394 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2395 /* Ship the ACL for now. we will copy it into buf later. */
2396 aclptr = ptr;
Steve Frenchb06d8932021-09-23 16:00:31 -05002397 ptr += sizeof(struct smb3_acl);
Steve Frenchfdef6652019-12-06 02:02:38 -06002398
2399 /* create one ACE to hold the mode embedded in reserved special SID */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002400 acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
2401 ptr += acelen;
2402 acl_size = acelen + sizeof(struct smb3_acl);
2403 ace_count = 1;
Steve French975221e2020-06-12 09:25:21 -05002404
2405 if (set_owner) {
2406 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002407 acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr);
2408 ptr += acelen;
2409 acl_size += acelen;
2410 ace_count += 1;
2411 }
Steve French975221e2020-06-12 09:25:21 -05002412
Steve French643fbce2020-01-16 19:55:33 -06002413 /* and one more ACE to allow access for authenticated users */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002414 acelen = setup_authusers_ACE((struct cifs_ace *)ptr);
2415 ptr += acelen;
2416 acl_size += acelen;
2417 ace_count += 1;
Steve French975221e2020-06-12 09:25:21 -05002418
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002419 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
2420 acl.AclSize = cpu_to_le16(acl_size);
2421 acl.AceCount = cpu_to_le16(ace_count);
Steve Frenchb06d8932021-09-23 16:00:31 -05002422 memcpy(aclptr, &acl, sizeof(struct smb3_acl));
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002423
2424 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
Shyam Prasad N7d3fc012021-08-04 18:37:22 +00002425 *len = roundup(ptr - (__u8 *)buf, 8);
Steve French975221e2020-06-12 09:25:21 -05002426
Steve Frenchfdef6652019-12-06 02:02:38 -06002427 return buf;
2428}
2429
2430static int
Steve French975221e2020-06-12 09:25:21 -05002431add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner)
Steve Frenchfdef6652019-12-06 02:02:38 -06002432{
2433 struct smb2_create_req *req = iov[0].iov_base;
2434 unsigned int num = *num_iovec;
2435 unsigned int len = 0;
2436
Steve French975221e2020-06-12 09:25:21 -05002437 iov[num].iov_base = create_sd_buf(mode, set_owner, &len);
Steve Frenchfdef6652019-12-06 02:02:38 -06002438 if (iov[num].iov_base == NULL)
2439 return -ENOMEM;
2440 iov[num].iov_len = len;
2441 if (!req->CreateContextsOffset)
2442 req->CreateContextsOffset = cpu_to_le32(
2443 sizeof(struct smb2_create_req) +
2444 iov[num - 1].iov_len);
2445 le32_add_cpu(&req->CreateContextsLength, len);
2446 *num_iovec = num + 1;
2447 return 0;
2448}
2449
Steve Frenchff2a09e2019-07-06 14:41:38 -05002450static struct crt_query_id_ctxt *
2451create_query_id_buf(void)
2452{
2453 struct crt_query_id_ctxt *buf;
2454
2455 buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL);
2456 if (!buf)
2457 return NULL;
2458
2459 buf->ccontext.DataOffset = cpu_to_le16(0);
2460 buf->ccontext.DataLength = cpu_to_le32(0);
2461 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2462 (struct crt_query_id_ctxt, Name));
2463 buf->ccontext.NameLength = cpu_to_le16(4);
2464 /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */
2465 buf->Name[0] = 'Q';
2466 buf->Name[1] = 'F';
2467 buf->Name[2] = 'i';
2468 buf->Name[3] = 'd';
2469 return buf;
2470}
2471
2472/* See MS-SMB2 2.2.13.2.9 */
2473static int
2474add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
2475{
2476 struct smb2_create_req *req = iov[0].iov_base;
2477 unsigned int num = *num_iovec;
2478
2479 iov[num].iov_base = create_query_id_buf();
2480 if (iov[num].iov_base == NULL)
2481 return -ENOMEM;
2482 iov[num].iov_len = sizeof(struct crt_query_id_ctxt);
2483 if (!req->CreateContextsOffset)
2484 req->CreateContextsOffset = cpu_to_le32(
2485 sizeof(struct smb2_create_req) +
2486 iov[num - 1].iov_len);
2487 le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_query_id_ctxt));
2488 *num_iovec = num + 1;
2489 return 0;
2490}
2491
Aurelien Aptelf0712922017-02-22 14:47:17 +01002492static int
2493alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
2494 const char *treename, const __le16 *path)
2495{
2496 int treename_len, path_len;
2497 struct nls_table *cp;
2498 const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
2499
2500 /*
2501 * skip leading "\\"
2502 */
2503 treename_len = strlen(treename);
2504 if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
2505 return -EINVAL;
2506
2507 treename += 2;
2508 treename_len -= 2;
2509
2510 path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
2511
2512 /*
2513 * make room for one path separator between the treename and
2514 * path
2515 */
2516 *out_len = treename_len + 1 + path_len;
2517
2518 /*
2519 * final path needs to be null-terminated UTF16 with a
2520 * size aligned to 8
2521 */
2522
2523 *out_size = roundup((*out_len+1)*2, 8);
2524 *out_path = kzalloc(*out_size, GFP_KERNEL);
2525 if (!*out_path)
2526 return -ENOMEM;
2527
2528 cp = load_nls_default();
2529 cifs_strtoUTF16(*out_path, treename, treename_len, cp);
2530 UniStrcat(*out_path, sep);
2531 UniStrcat(*out_path, path);
2532 unload_nls(cp);
2533
2534 return 0;
2535}
2536
Steve Frenchbea851b2018-06-14 21:56:32 -05002537int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
2538 umode_t mode, struct cifs_tcon *tcon,
2539 const char *full_path,
2540 struct cifs_sb_info *cifs_sb)
2541{
2542 struct smb_rqst rqst;
2543 struct smb2_create_req *req;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002544 struct smb2_create_rsp *rsp = NULL;
Steve Frenchbea851b2018-06-14 21:56:32 -05002545 struct cifs_ses *ses = tcon->ses;
2546 struct kvec iov[3]; /* make sure at least one for each open context */
2547 struct kvec rsp_iov = {NULL, 0};
2548 int resp_buftype;
2549 int uni_path_len;
2550 __le16 *copy_path = NULL;
2551 int copy_size;
2552 int rc = 0;
2553 unsigned int n_iov = 2;
2554 __u32 file_attributes = 0;
2555 char *pc_buf = NULL;
2556 int flags = 0;
2557 unsigned int total_len;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002558 __le16 *utf16_path = NULL;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002559 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve Frenchbea851b2018-06-14 21:56:32 -05002560
2561 cifs_dbg(FYI, "mkdir\n");
2562
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002563 /* resource #1: path allocation */
2564 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2565 if (!utf16_path)
2566 return -ENOMEM;
2567
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002568 if (!ses || !server) {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002569 rc = -EIO;
2570 goto err_free_path;
2571 }
Steve Frenchbea851b2018-06-14 21:56:32 -05002572
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002573 /* resource #2: request */
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002574 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2575 (void **) &req, &total_len);
Steve Frenchbea851b2018-06-14 21:56:32 -05002576 if (rc)
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002577 goto err_free_path;
2578
Steve Frenchbea851b2018-06-14 21:56:32 -05002579
2580 if (smb3_encryption_required(tcon))
2581 flags |= CIFS_TRANSFORM_REQ;
2582
Steve Frenchbea851b2018-06-14 21:56:32 -05002583 req->ImpersonationLevel = IL_IMPERSONATION;
2584 req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
2585 /* File attributes ignored on open (used in create though) */
2586 req->FileAttributes = cpu_to_le32(file_attributes);
2587 req->ShareAccess = FILE_SHARE_ALL_LE;
2588 req->CreateDisposition = cpu_to_le32(FILE_CREATE);
2589 req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
2590
2591 iov[0].iov_base = (char *)req;
2592 /* -1 since last byte is buf[0] which is sent below (path) */
2593 iov[0].iov_len = total_len - 1;
2594
2595 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
2596
2597 /* [MS-SMB2] 2.2.13 NameOffset:
2598 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2599 * the SMB2 header, the file name includes a prefix that will
2600 * be processed during DFS name normalization as specified in
2601 * section 3.3.5.9. Otherwise, the file name is relative to
2602 * the share that is identified by the TreeId in the SMB2
2603 * header.
2604 */
2605 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2606 int name_len;
2607
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002608 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
Steve Frenchbea851b2018-06-14 21:56:32 -05002609 rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
2610 &name_len,
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002611 tcon->treeName, utf16_path);
2612 if (rc)
2613 goto err_free_req;
2614
Steve Frenchbea851b2018-06-14 21:56:32 -05002615 req->NameLength = cpu_to_le16(name_len * 2);
2616 uni_path_len = copy_size;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002617 /* free before overwriting resource */
2618 kfree(utf16_path);
2619 utf16_path = copy_path;
Steve Frenchbea851b2018-06-14 21:56:32 -05002620 } else {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002621 uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
Steve Frenchbea851b2018-06-14 21:56:32 -05002622 /* MUST set path len (NameLength) to 0 opening root of share */
2623 req->NameLength = cpu_to_le16(uni_path_len - 2);
2624 if (uni_path_len % 8 != 0) {
2625 copy_size = roundup(uni_path_len, 8);
2626 copy_path = kzalloc(copy_size, GFP_KERNEL);
2627 if (!copy_path) {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002628 rc = -ENOMEM;
2629 goto err_free_req;
Steve Frenchbea851b2018-06-14 21:56:32 -05002630 }
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002631 memcpy((char *)copy_path, (const char *)utf16_path,
Steve Frenchbea851b2018-06-14 21:56:32 -05002632 uni_path_len);
2633 uni_path_len = copy_size;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002634 /* free before overwriting resource */
2635 kfree(utf16_path);
2636 utf16_path = copy_path;
Steve Frenchbea851b2018-06-14 21:56:32 -05002637 }
2638 }
2639
2640 iov[1].iov_len = uni_path_len;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002641 iov[1].iov_base = utf16_path;
Steve Frenchbea851b2018-06-14 21:56:32 -05002642 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
2643
2644 if (tcon->posix_extensions) {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002645 /* resource #3: posix buf */
Steve Frenchbea851b2018-06-14 21:56:32 -05002646 rc = add_posix_context(iov, &n_iov, mode);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002647 if (rc)
2648 goto err_free_req;
Steve Frenchbea851b2018-06-14 21:56:32 -05002649 pc_buf = iov[n_iov-1].iov_base;
2650 }
2651
2652
2653 memset(&rqst, 0, sizeof(struct smb_rqst));
2654 rqst.rq_iov = iov;
2655 rqst.rq_nvec = n_iov;
2656
Steve Frenchd2f15422019-09-22 00:55:46 -05002657 /* no need to inc num_remote_opens because we close it just below */
Steve Frenchefe2e9f2019-02-26 19:08:12 -06002658 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
2659 FILE_WRITE_ATTRIBUTES);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002660 /* resource #4: response buffer */
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002661 rc = cifs_send_recv(xid, ses, server,
2662 &rqst, &resp_buftype, flags, &rsp_iov);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002663 if (rc) {
Steve Frenchbea851b2018-06-14 21:56:32 -05002664 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
2665 trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002666 CREATE_NOT_FILE,
2667 FILE_WRITE_ATTRIBUTES, rc);
2668 goto err_free_rsp_buf;
2669 }
2670
Steve Frenchca780da2021-11-11 15:35:34 -06002671 /*
2672 * Although unlikely to be possible for rsp to be null and rc not set,
2673 * adding check below is slightly safer long term (and quiets Coverity
2674 * warning)
2675 */
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002676 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
Steve Frenchca780da2021-11-11 15:35:34 -06002677 if (rsp == NULL) {
2678 rc = -EIO;
2679 kfree(pc_buf);
2680 goto err_free_req;
2681 }
2682
Ronnie Sahlbergc4628702021-09-08 12:10:15 +10002683 trace_smb3_posix_mkdir_done(xid, le64_to_cpu(rsp->PersistentFileId),
2684 tcon->tid,
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002685 ses->Suid, CREATE_NOT_FILE,
2686 FILE_WRITE_ATTRIBUTES);
Steve Frenchbea851b2018-06-14 21:56:32 -05002687
Ronnie Sahlbergc4628702021-09-08 12:10:15 +10002688 SMB2_close(xid, tcon, le64_to_cpu(rsp->PersistentFileId),
2689 le64_to_cpu(rsp->VolatileFileId));
Steve Frenchbea851b2018-06-14 21:56:32 -05002690
2691 /* Eventually save off posix specific response info and timestaps */
2692
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002693err_free_rsp_buf:
Steve Frenchbea851b2018-06-14 21:56:32 -05002694 free_rsp_buf(resp_buftype, rsp);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002695 kfree(pc_buf);
2696err_free_req:
2697 cifs_small_buf_release(req);
2698err_free_path:
2699 kfree(utf16_path);
Steve Frenchbea851b2018-06-14 21:56:32 -05002700 return rc;
Steve Frenchbea851b2018-06-14 21:56:32 -05002701}
Steve Frenchbea851b2018-06-14 21:56:32 -05002702
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002703int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002704SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
2705 struct smb_rqst *rqst, __u8 *oplock,
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002706 struct cifs_open_parms *oparms, __le16 *path)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002707{
2708 struct smb2_create_req *req;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002709 unsigned int n_iov = 2;
Pavel Shilovskyca819832013-07-05 12:21:26 +04002710 __u32 file_attributes = 0;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002711 int copy_size;
2712 int uni_path_len;
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002713 unsigned int total_len;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002714 struct kvec *iov = rqst->rq_iov;
2715 __le16 *copy_path;
2716 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002717
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002718 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2719 (void **) &req, &total_len);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002720 if (rc)
2721 return rc;
2722
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002723 iov[0].iov_base = (char *)req;
2724 /* -1 since last byte is buf[0] which is sent below (path) */
2725 iov[0].iov_len = total_len - 1;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07002726
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002727 if (oparms->create_options & CREATE_OPTION_READONLY)
Pavel Shilovskyca819832013-07-05 12:21:26 +04002728 file_attributes |= ATTR_READONLY;
Steve Frenchdb8b6312014-09-22 05:13:55 -05002729 if (oparms->create_options & CREATE_OPTION_SPECIAL)
2730 file_attributes |= ATTR_SYSTEM;
Pavel Shilovskyca819832013-07-05 12:21:26 +04002731
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002732 req->ImpersonationLevel = IL_IMPERSONATION;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002733 req->DesiredAccess = cpu_to_le32(oparms->desired_access);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002734 /* File attributes ignored on open (used in create though) */
2735 req->FileAttributes = cpu_to_le32(file_attributes);
2736 req->ShareAccess = FILE_SHARE_ALL_LE;
Steve Frenchc3ca78e2019-09-25 00:32:13 -05002737
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002738 req->CreateDisposition = cpu_to_le32(oparms->disposition);
2739 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002740 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
Aurelien Aptelf0712922017-02-22 14:47:17 +01002741
2742 /* [MS-SMB2] 2.2.13 NameOffset:
2743 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2744 * the SMB2 header, the file name includes a prefix that will
2745 * be processed during DFS name normalization as specified in
2746 * section 3.3.5.9. Otherwise, the file name is relative to
2747 * the share that is identified by the TreeId in the SMB2
2748 * header.
2749 */
2750 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2751 int name_len;
2752
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002753 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
Aurelien Aptelf0712922017-02-22 14:47:17 +01002754 rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
2755 &name_len,
2756 tcon->treeName, path);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002757 if (rc)
Aurelien Aptelf0712922017-02-22 14:47:17 +01002758 return rc;
2759 req->NameLength = cpu_to_le16(name_len * 2);
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04002760 uni_path_len = copy_size;
2761 path = copy_path;
Aurelien Aptelf0712922017-02-22 14:47:17 +01002762 } else {
2763 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
2764 /* MUST set path len (NameLength) to 0 opening root of share */
2765 req->NameLength = cpu_to_le16(uni_path_len - 2);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002766 copy_size = uni_path_len;
2767 if (copy_size % 8 != 0)
2768 copy_size = roundup(copy_size, 8);
2769 copy_path = kzalloc(copy_size, GFP_KERNEL);
2770 if (!copy_path)
2771 return -ENOMEM;
2772 memcpy((char *)copy_path, (const char *)path,
2773 uni_path_len);
2774 uni_path_len = copy_size;
2775 path = copy_path;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002776 }
2777
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04002778 iov[1].iov_len = uni_path_len;
2779 iov[1].iov_base = path;
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04002780
Steve French3e7a02d2019-09-11 21:46:20 -05002781 if ((!server->oplocks) || (tcon->no_lease))
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002782 *oplock = SMB2_OPLOCK_LEVEL_NONE;
2783
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002784 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002785 *oplock == SMB2_OPLOCK_LEVEL_NONE)
2786 req->RequestedOplockLevel = *oplock;
Steve Frenchf8015682018-08-31 15:12:10 -05002787 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
2788 (oparms->create_options & CREATE_NOT_FILE))
2789 req->RequestedOplockLevel = *oplock; /* no srv lease support */
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002790 else {
Stefano Brivio729c0c92018-07-05 15:10:02 +02002791 rc = add_lease_context(server, iov, &n_iov,
2792 oparms->fid->lease_key, oplock);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002793 if (rc)
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002794 return rc;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002795 }
2796
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002797 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
2798 /* need to set Next field of lease context if we request it */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002799 if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002800 struct create_context *ccontext =
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002801 (struct create_context *)iov[n_iov-1].iov_base;
Steve French1c469432013-07-10 12:50:57 -05002802 ccontext->Next =
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002803 cpu_to_le32(server->vals->create_lease_size);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002804 }
Steve Frenchb56eae42015-11-03 09:26:27 -06002805
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002806 rc = add_durable_context(iov, &n_iov, oparms,
Steve Frenchb56eae42015-11-03 09:26:27 -06002807 tcon->use_persistent);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002808 if (rc)
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002809 return rc;
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002810 }
2811
Steve Frenchce558b02018-05-31 19:16:54 -05002812 if (tcon->posix_extensions) {
2813 if (n_iov > 2) {
2814 struct create_context *ccontext =
2815 (struct create_context *)iov[n_iov-1].iov_base;
2816 ccontext->Next =
2817 cpu_to_le32(iov[n_iov-1].iov_len);
2818 }
2819
2820 rc = add_posix_context(iov, &n_iov, oparms->mode);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002821 if (rc)
Steve Frenchce558b02018-05-31 19:16:54 -05002822 return rc;
Steve Frenchce558b02018-05-31 19:16:54 -05002823 }
Steve Frenchce558b02018-05-31 19:16:54 -05002824
Steve Frenchcdeaf9d2018-08-10 02:25:06 -05002825 if (tcon->snapshot_time) {
2826 cifs_dbg(FYI, "adding snapshot context\n");
2827 if (n_iov > 2) {
2828 struct create_context *ccontext =
2829 (struct create_context *)iov[n_iov-1].iov_base;
2830 ccontext->Next =
2831 cpu_to_le32(iov[n_iov-1].iov_len);
2832 }
2833
2834 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
2835 if (rc)
2836 return rc;
2837 }
2838
Steve French975221e2020-06-12 09:25:21 -05002839 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) {
2840 bool set_mode;
2841 bool set_owner;
2842
2843 if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) &&
2844 (oparms->mode != ACL_NO_MODE))
2845 set_mode = true;
2846 else {
2847 set_mode = false;
2848 oparms->mode = ACL_NO_MODE;
Steve Frenchc3ca78e2019-09-25 00:32:13 -05002849 }
2850
Steve French975221e2020-06-12 09:25:21 -05002851 if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
2852 set_owner = true;
2853 else
2854 set_owner = false;
2855
2856 if (set_owner | set_mode) {
2857 if (n_iov > 2) {
2858 struct create_context *ccontext =
2859 (struct create_context *)iov[n_iov-1].iov_base;
2860 ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
2861 }
2862
2863 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode);
2864 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
2865 if (rc)
2866 return rc;
2867 }
Steve Frenchc3ca78e2019-09-25 00:32:13 -05002868 }
2869
Steve Frenchff2a09e2019-07-06 14:41:38 -05002870 if (n_iov > 2) {
2871 struct create_context *ccontext =
2872 (struct create_context *)iov[n_iov-1].iov_base;
2873 ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
2874 }
2875 add_query_id_context(iov, &n_iov);
Steve Frenchcdeaf9d2018-08-10 02:25:06 -05002876
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002877 rqst->rq_nvec = n_iov;
2878 return 0;
2879}
2880
2881/* rq_iov[0] is the request and is released by cifs_small_buf_release().
2882 * All other vectors are freed by kfree().
2883 */
2884void
2885SMB2_open_free(struct smb_rqst *rqst)
2886{
2887 int i;
2888
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10002889 if (rqst && rqst->rq_iov) {
2890 cifs_small_buf_release(rqst->rq_iov[0].iov_base);
2891 for (i = 1; i < rqst->rq_nvec; i++)
2892 if (rqst->rq_iov[i].iov_base != smb2_padding)
2893 kfree(rqst->rq_iov[i].iov_base);
2894 }
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002895}
2896
2897int
2898SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2899 __u8 *oplock, struct smb2_file_all_info *buf,
Aurelien Aptel69dda302020-03-02 17:53:22 +01002900 struct create_posix_rsp *posix,
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002901 struct kvec *err_iov, int *buftype)
2902{
2903 struct smb_rqst rqst;
2904 struct smb2_create_rsp *rsp = NULL;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002905 struct cifs_tcon *tcon = oparms->tcon;
2906 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002907 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002908 struct kvec iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002909 struct kvec rsp_iov = {NULL, 0};
Garry McNultyef2298a2018-10-03 20:51:21 +01002910 int resp_buftype = CIFS_NO_BUFFER;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002911 int rc = 0;
2912 int flags = 0;
2913
2914 cifs_dbg(FYI, "create/open\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002915 if (!ses || !server)
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002916 return -EIO;
2917
2918 if (smb3_encryption_required(tcon))
2919 flags |= CIFS_TRANSFORM_REQ;
2920
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10002921 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002922 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10002923 rqst.rq_iov = iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002924 rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002925
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002926 rc = SMB2_open_init(tcon, server,
2927 &rqst, oplock, oparms, path);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002928 if (rc)
2929 goto creat_exit;
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10002930
Steve Frenchefe2e9f2019-02-26 19:08:12 -06002931 trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
2932 oparms->create_options, oparms->desired_access);
2933
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002934 rc = cifs_send_recv(xid, ses, server,
2935 &rqst, &resp_buftype, flags,
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002936 &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002937 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002938
2939 if (rc != 0) {
2940 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002941 if (err_iov && rsp) {
2942 *err_iov = rsp_iov;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002943 *buftype = resp_buftype;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002944 resp_buftype = CIFS_NO_BUFFER;
2945 rsp = NULL;
2946 }
Steve French28d59362018-05-30 21:42:34 -05002947 trace_smb3_open_err(xid, tcon->tid, ses->Suid,
2948 oparms->create_options, oparms->desired_access, rc);
Steve French7dcc82c2019-09-11 00:07:36 -05002949 if (rc == -EREMCHG) {
Joe Perchesa0a30362020-04-14 22:42:53 -07002950 pr_warn_once("server share %s deleted\n",
2951 tcon->treeName);
Steve French7dcc82c2019-09-11 00:07:36 -05002952 tcon->need_reconnect = true;
2953 }
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002954 goto creat_exit;
Steve French6b789512021-11-11 16:10:00 -06002955 } else if (rsp == NULL) /* unlikely to happen, but safer to check */
2956 goto creat_exit;
2957 else
Ronnie Sahlbergc4628702021-09-08 12:10:15 +10002958 trace_smb3_open_done(xid, le64_to_cpu(rsp->PersistentFileId),
2959 tcon->tid,
Steve French28d59362018-05-30 21:42:34 -05002960 ses->Suid, oparms->create_options,
2961 oparms->desired_access);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002962
Steve Frenchfae80442018-10-19 17:14:32 -05002963 atomic_inc(&tcon->num_remote_opens);
Ronnie Sahlbergc4628702021-09-08 12:10:15 +10002964 oparms->fid->persistent_fid = le64_to_cpu(rsp->PersistentFileId);
2965 oparms->fid->volatile_fid = le64_to_cpu(rsp->VolatileFileId);
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002966 oparms->fid->access = oparms->desired_access;
Steve Frenchdfe33f92018-10-30 19:50:31 -05002967#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002968 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
Steve Frenchdfe33f92018-10-30 19:50:31 -05002969#endif /* CIFS_DEBUG2 */
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07002970
2971 if (buf) {
Kees Cookfbcff332021-06-21 14:44:46 -07002972 buf->CreationTime = rsp->CreationTime;
2973 buf->LastAccessTime = rsp->LastAccessTime;
2974 buf->LastWriteTime = rsp->LastWriteTime;
2975 buf->ChangeTime = rsp->ChangeTime;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07002976 buf->AllocationSize = rsp->AllocationSize;
2977 buf->EndOfFile = rsp->EndofFile;
2978 buf->Attributes = rsp->FileAttributes;
2979 buf->NumberOfLinks = cpu_to_le32(1);
2980 buf->DeletePending = 0;
2981 }
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002982
Steve French89a5bfa2019-07-18 17:22:18 -05002983
2984 smb2_parse_contexts(server, rsp, &oparms->fid->epoch,
Aurelien Aptel69dda302020-03-02 17:53:22 +01002985 oparms->fid->lease_key, oplock, buf, posix);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002986creat_exit:
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002987 SMB2_open_free(&rqst);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002988 free_rsp_buf(resp_buftype, rsp);
2989 return rc;
2990}
2991
Steve French4a72daf2013-06-25 00:20:49 -05002992int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002993SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
2994 struct smb_rqst *rqst,
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10002995 u64 persistent_fid, u64 volatile_fid, u32 opcode,
Steve French153322f2019-03-28 22:32:49 -05002996 bool is_fsctl, char *in_data, u32 indatalen,
2997 __u32 max_response_size)
Steve French4a72daf2013-06-25 00:20:49 -05002998{
2999 struct smb2_ioctl_req *req;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003000 struct kvec *iov = rqst->rq_iov;
Ronnie Sahlberg97754682017-11-09 12:14:20 +11003001 unsigned int total_len;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003002 int rc;
Long Li2c87d6a2019-05-15 14:09:05 -07003003 char *in_data_buf;
Steve French4a72daf2013-06-25 00:20:49 -05003004
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003005 rc = smb2_ioctl_req_init(opcode, tcon, server,
3006 (void **) &req, &total_len);
Steve French4a72daf2013-06-25 00:20:49 -05003007 if (rc)
3008 return rc;
3009
Long Li2c87d6a2019-05-15 14:09:05 -07003010 if (indatalen) {
3011 /*
3012 * indatalen is usually small at a couple of bytes max, so
3013 * just allocate through generic pool
3014 */
YueHaibingd81f0972019-06-01 03:31:10 +00003015 in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS);
Long Li2c87d6a2019-05-15 14:09:05 -07003016 if (!in_data_buf) {
3017 cifs_small_buf_release(req);
3018 return -ENOMEM;
3019 }
Long Li2c87d6a2019-05-15 14:09:05 -07003020 }
3021
Steve French4a72daf2013-06-25 00:20:49 -05003022 req->CtlCode = cpu_to_le32(opcode);
3023 req->PersistentFileId = persistent_fid;
3024 req->VolatileFileId = volatile_fid;
3025
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003026 iov[0].iov_base = (char *)req;
3027 /*
3028 * If no input data, the size of ioctl struct in
3029 * protocol spec still includes a 1 byte data buffer,
3030 * but if input data passed to ioctl, we do not
3031 * want to double count this, so we do not send
3032 * the dummy one byte of data in iovec[0] if sending
3033 * input data (in iovec[1]).
3034 */
Steve French4a72daf2013-06-25 00:20:49 -05003035 if (indatalen) {
3036 req->InputCount = cpu_to_le32(indatalen);
3037 /* do not set InputOffset if no input data */
3038 req->InputOffset =
Ronnie Sahlberg97754682017-11-09 12:14:20 +11003039 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003040 rqst->rq_nvec = 2;
3041 iov[0].iov_len = total_len - 1;
Long Li2c87d6a2019-05-15 14:09:05 -07003042 iov[1].iov_base = in_data_buf;
Steve French4a72daf2013-06-25 00:20:49 -05003043 iov[1].iov_len = indatalen;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003044 } else {
3045 rqst->rq_nvec = 1;
3046 iov[0].iov_len = total_len;
3047 }
Steve French4a72daf2013-06-25 00:20:49 -05003048
3049 req->OutputOffset = 0;
3050 req->OutputCount = 0; /* MBZ */
3051
3052 /*
Steve French153322f2019-03-28 22:32:49 -05003053 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
3054 * We Could increase default MaxOutputResponse, but that could require
3055 * more credits. Windows typically sets this smaller, but for some
Steve French4a72daf2013-06-25 00:20:49 -05003056 * ioctls it may be useful to allow server to send more. No point
3057 * limiting what the server can send as long as fits in one credit
Steve French153322f2019-03-28 22:32:49 -05003058 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
3059 * to increase this limit up in the future.
3060 * Note that for snapshot queries that servers like Azure expect that
3061 * the first query be minimal size (and just used to get the number/size
3062 * of previous versions) so response size must be specified as EXACTLY
3063 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
3064 * of eight bytes. Currently that is the only case where we set max
3065 * response size smaller.
Steve French4a72daf2013-06-25 00:20:49 -05003066 */
Steve French153322f2019-03-28 22:32:49 -05003067 req->MaxOutputResponse = cpu_to_le32(max_response_size);
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003068 req->hdr.CreditCharge =
Namjae Jeonebf57442020-06-11 11:21:19 +09003069 cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
3070 SMB2_MAX_BUFFER_SIZE));
Steve French4a72daf2013-06-25 00:20:49 -05003071 if (is_fsctl)
3072 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
3073 else
3074 req->Flags = 0;
3075
Steve French4587eee2017-10-25 15:58:31 -05003076 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
3077 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003078 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Steve French4a72daf2013-06-25 00:20:49 -05003079
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003080 return 0;
3081}
3082
3083void
3084SMB2_ioctl_free(struct smb_rqst *rqst)
3085{
Murphy Zhou6457c202019-05-23 12:12:43 +08003086 int i;
Long Li2c87d6a2019-05-15 14:09:05 -07003087 if (rqst && rqst->rq_iov) {
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003088 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
Murphy Zhou6457c202019-05-23 12:12:43 +08003089 for (i = 1; i < rqst->rq_nvec; i++)
3090 if (rqst->rq_iov[i].iov_base != smb2_padding)
3091 kfree(rqst->rq_iov[i].iov_base);
Long Li2c87d6a2019-05-15 14:09:05 -07003092 }
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003093}
3094
Steve French153322f2019-03-28 22:32:49 -05003095
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003096/*
3097 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
3098 */
3099int
3100SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3101 u64 volatile_fid, u32 opcode, bool is_fsctl,
Steve French153322f2019-03-28 22:32:49 -05003102 char *in_data, u32 indatalen, u32 max_out_data_len,
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003103 char **out_data, u32 *plen /* returned data len */)
3104{
3105 struct smb_rqst rqst;
3106 struct smb2_ioctl_rsp *rsp = NULL;
3107 struct cifs_ses *ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003108 struct TCP_Server_Info *server;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003109 struct kvec iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003110 struct kvec rsp_iov = {NULL, 0};
3111 int resp_buftype = CIFS_NO_BUFFER;
3112 int rc = 0;
3113 int flags = 0;
3114
3115 cifs_dbg(FYI, "SMB2 IOCTL\n");
3116
3117 if (out_data != NULL)
3118 *out_data = NULL;
3119
3120 /* zero out returned data len, in case of error */
3121 if (plen)
3122 *plen = 0;
3123
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003124 if (!tcon)
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003125 return -EIO;
3126
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003127 ses = tcon->ses;
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003128 if (!ses)
3129 return -EIO;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003130
3131 server = cifs_pick_channel(ses);
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003132 if (!server)
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003133 return -EIO;
3134
3135 if (smb3_encryption_required(tcon))
3136 flags |= CIFS_TRANSFORM_REQ;
3137
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003138 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003139 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003140 rqst.rq_iov = iov;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003141 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003142
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003143 rc = SMB2_ioctl_init(tcon, server,
3144 &rqst, persistent_fid, volatile_fid, opcode,
Steve French153322f2019-03-28 22:32:49 -05003145 is_fsctl, in_data, indatalen, max_out_data_len);
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003146 if (rc)
3147 goto ioctl_exit;
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003148
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003149 rc = cifs_send_recv(xid, ses, server,
3150 &rqst, &resp_buftype, flags,
Ronnie Sahlberg97754682017-11-09 12:14:20 +11003151 &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003152 rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
Steve French4a72daf2013-06-25 00:20:49 -05003153
Steve Frencheccb4422018-05-17 21:16:55 -05003154 if (rc != 0)
3155 trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
3156 ses->Suid, 0, opcode, rc);
3157
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003158 if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) {
Steve French8e353102015-03-26 19:47:02 -05003159 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
Steve French4a72daf2013-06-25 00:20:49 -05003160 goto ioctl_exit;
Steve French9bf0c9c2013-11-16 18:05:28 -06003161 } else if (rc == -EINVAL) {
3162 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
3163 (opcode != FSCTL_SRV_COPYCHUNK)) {
Steve French8e353102015-03-26 19:47:02 -05003164 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
Steve French9bf0c9c2013-11-16 18:05:28 -06003165 goto ioctl_exit;
3166 }
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003167 } else if (rc == -E2BIG) {
3168 if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) {
3169 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3170 goto ioctl_exit;
3171 }
Steve French4a72daf2013-06-25 00:20:49 -05003172 }
3173
3174 /* check if caller wants to look at return data or just return rc */
3175 if ((plen == NULL) || (out_data == NULL))
3176 goto ioctl_exit;
3177
Steve French4d9beec2021-11-11 14:39:23 -06003178 /*
3179 * Although unlikely to be possible for rsp to be null and rc not set,
3180 * adding check below is slightly safer long term (and quiets Coverity
3181 * warning)
3182 */
3183 if (rsp == NULL) {
3184 rc = -EIO;
3185 goto ioctl_exit;
3186 }
3187
Steve French4a72daf2013-06-25 00:20:49 -05003188 *plen = le32_to_cpu(rsp->OutputCount);
3189
3190 /* We check for obvious errors in the output buffer length and offset */
3191 if (*plen == 0)
3192 goto ioctl_exit; /* server returned no data */
Dan Carpenter2d204ee2018-09-10 14:12:07 +03003193 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003194 cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
Steve French4a72daf2013-06-25 00:20:49 -05003195 *plen = 0;
3196 rc = -EIO;
3197 goto ioctl_exit;
3198 }
3199
Dan Carpenter2d204ee2018-09-10 14:12:07 +03003200 if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003201 cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
Steve French4a72daf2013-06-25 00:20:49 -05003202 le32_to_cpu(rsp->OutputOffset));
3203 *plen = 0;
3204 rc = -EIO;
3205 goto ioctl_exit;
3206 }
3207
YueHaibingd034fee2018-09-10 01:33:06 +00003208 *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
3209 *plen, GFP_KERNEL);
Steve French4a72daf2013-06-25 00:20:49 -05003210 if (*out_data == NULL) {
3211 rc = -ENOMEM;
3212 goto ioctl_exit;
3213 }
3214
Steve French4a72daf2013-06-25 00:20:49 -05003215ioctl_exit:
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003216 SMB2_ioctl_free(&rqst);
Steve French4a72daf2013-06-25 00:20:49 -05003217 free_rsp_buf(resp_buftype, rsp);
3218 return rc;
3219}
3220
Steve French64a5cfa2013-10-14 15:31:32 -05003221/*
3222 * Individual callers to ioctl worker function follow
3223 */
3224
3225int
3226SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
3227 u64 persistent_fid, u64 volatile_fid)
3228{
3229 int rc;
Steve French64a5cfa2013-10-14 15:31:32 -05003230 struct compress_ioctl fsctl_input;
3231 char *ret_data = NULL;
3232
3233 fsctl_input.CompressionState =
Fabian Frederickbc09d142014-12-10 15:41:15 -08003234 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
Steve French64a5cfa2013-10-14 15:31:32 -05003235
3236 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
3237 FSCTL_SET_COMPRESSION, true /* is_fsctl */,
3238 (char *)&fsctl_input /* data input */,
Steve French153322f2019-03-28 22:32:49 -05003239 2 /* in data len */, CIFSMaxBufSize /* max out data */,
3240 &ret_data /* out data */, NULL);
Steve French64a5cfa2013-10-14 15:31:32 -05003241
3242 cifs_dbg(FYI, "set compression rc %d\n", rc);
Steve French64a5cfa2013-10-14 15:31:32 -05003243
3244 return rc;
3245}
3246
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003247int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003248SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3249 struct smb_rqst *rqst,
Steve French43f8a6a2019-12-02 21:46:54 -06003250 u64 persistent_fid, u64 volatile_fid, bool query_attrs)
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003251{
3252 struct smb2_close_req *req;
3253 struct kvec *iov = rqst->rq_iov;
3254 unsigned int total_len;
3255 int rc;
3256
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003257 rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
3258 (void **) &req, &total_len);
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003259 if (rc)
3260 return rc;
3261
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003262 req->PersistentFileId = cpu_to_le64(persistent_fid);
3263 req->VolatileFileId = cpu_to_le64(volatile_fid);
Steve French43f8a6a2019-12-02 21:46:54 -06003264 if (query_attrs)
3265 req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
3266 else
3267 req->Flags = 0;
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003268 iov[0].iov_base = (char *)req;
3269 iov[0].iov_len = total_len;
3270
3271 return 0;
3272}
3273
3274void
3275SMB2_close_free(struct smb_rqst *rqst)
3276{
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10003277 if (rqst && rqst->rq_iov)
3278 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003279}
3280
3281int
Steve French43f8a6a2019-12-02 21:46:54 -06003282__SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3283 u64 persistent_fid, u64 volatile_fid,
3284 struct smb2_file_network_open_info *pbuf)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003285{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003286 struct smb_rqst rqst;
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003287 struct smb2_close_rsp *rsp = NULL;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003288 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003289 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003290 struct kvec iov[1];
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003291 struct kvec rsp_iov;
Garry McNultyef2298a2018-10-03 20:51:21 +01003292 int resp_buftype = CIFS_NO_BUFFER;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003293 int rc = 0;
Steve French9e8fae22019-12-02 17:55:41 -06003294 int flags = 0;
Steve French43f8a6a2019-12-02 21:46:54 -06003295 bool query_attrs = false;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003296
Joe Perchesf96637b2013-05-04 22:12:25 -05003297 cifs_dbg(FYI, "Close\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003298
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003299 if (!ses || !server)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003300 return -EIO;
3301
Steve French5a77e752018-05-09 17:43:08 -05003302 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07003303 flags |= CIFS_TRANSFORM_REQ;
3304
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003305 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003306 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003307 rqst.rq_iov = iov;
3308 rqst.rq_nvec = 1;
3309
Steve French43f8a6a2019-12-02 21:46:54 -06003310 /* check if need to ask server to return timestamps in close response */
3311 if (pbuf)
3312 query_attrs = true;
3313
Steve Frenchf90f9792019-09-03 18:35:42 -05003314 trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003315 rc = SMB2_close_init(tcon, server,
3316 &rqst, persistent_fid, volatile_fid,
Steve French43f8a6a2019-12-02 21:46:54 -06003317 query_attrs);
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003318 if (rc)
3319 goto close_exit;
3320
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003321 rc = cifs_send_recv(xid, ses, server,
3322 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003323 rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003324
3325 if (rc != 0) {
Namjae Jeond4a029d2014-08-20 19:39:59 +09003326 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05003327 trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
3328 rc);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003329 goto close_exit;
Steve French43f8a6a2019-12-02 21:46:54 -06003330 } else {
Steve Frenchf90f9792019-09-03 18:35:42 -05003331 trace_smb3_close_done(xid, persistent_fid, tcon->tid,
3332 ses->Suid);
Steve French43f8a6a2019-12-02 21:46:54 -06003333 /*
3334 * Note that have to subtract 4 since struct network_open_info
3335 * has a final 4 byte pad that close response does not have
3336 */
3337 if (pbuf)
3338 memcpy(pbuf, (char *)&rsp->CreationTime, sizeof(*pbuf) - 4);
3339 }
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003340
Steve Frenchfae80442018-10-19 17:14:32 -05003341 atomic_dec(&tcon->num_remote_opens);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003342close_exit:
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003343 SMB2_close_free(&rqst);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003344 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -08003345
3346 /* retry close in a worker thread if this one is interrupted */
Paulo Alcantara2659d3b2021-01-13 14:16:16 -03003347 if (is_interrupt_error(rc)) {
Steve French9e8fae22019-12-02 17:55:41 -06003348 int tmp_rc;
3349
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -08003350 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
3351 volatile_fid);
3352 if (tmp_rc)
3353 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
3354 persistent_fid, tmp_rc);
3355 }
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -08003356 return rc;
Ronnie Sahlberg97ca1762018-04-26 08:50:49 -06003357}
3358
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10003359int
Steve French43f8a6a2019-12-02 21:46:54 -06003360SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3361 u64 persistent_fid, u64 volatile_fid)
3362{
3363 return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL);
3364}
3365
3366int
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10003367smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
3368 struct kvec *iov, unsigned int min_buf_size)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003369{
Ronnie Sahlbergc1596ff2018-04-09 18:06:30 +10003370 unsigned int smb_len = iov->iov_len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003371 char *end_of_smb = smb_len + (char *)iov->iov_base;
3372 char *begin_of_buf = offset + (char *)iov->iov_base;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003373 char *end_of_buf = begin_of_buf + buffer_length;
3374
3375
3376 if (buffer_length < min_buf_size) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003377 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
3378 buffer_length, min_buf_size);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003379 return -EINVAL;
3380 }
3381
3382 /* check if beyond RFC1001 maximum length */
3383 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003384 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
3385 buffer_length, smb_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003386 return -EINVAL;
3387 }
3388
3389 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07003390 cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003391 return -EINVAL;
3392 }
3393
3394 return 0;
3395}
3396
3397/*
3398 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
3399 * Caller must free buffer.
3400 */
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10003401int
3402smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
3403 struct kvec *iov, unsigned int minbufsize,
3404 char *data)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003405{
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003406 char *begin_of_buf = offset + (char *)iov->iov_base;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003407 int rc;
3408
3409 if (!data)
3410 return -EINVAL;
3411
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10003412 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003413 if (rc)
3414 return rc;
3415
3416 memcpy(data, begin_of_buf, buffer_length);
3417
3418 return 0;
3419}
3420
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003421int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003422SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3423 struct smb_rqst *rqst,
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003424 u64 persistent_fid, u64 volatile_fid,
3425 u8 info_class, u8 info_type, u32 additional_info,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003426 size_t output_len, size_t input_len, void *input)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003427{
3428 struct smb2_query_info_req *req;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003429 struct kvec *iov = rqst->rq_iov;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11003430 unsigned int total_len;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003431 int rc;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003432
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003433 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
3434 (void **) &req, &total_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003435 if (rc)
3436 return rc;
3437
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003438 req->InfoType = info_type;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003439 req->FileInfoClass = info_class;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003440 req->PersistentFileId = persistent_fid;
3441 req->VolatileFileId = volatile_fid;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003442 req->AdditionalInformation = cpu_to_le32(additional_info);
Aurelien Aptel48923d22017-10-17 14:47:17 +02003443
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003444 req->OutputBufferLength = cpu_to_le32(output_len);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003445 if (input_len) {
3446 req->InputBufferLength = cpu_to_le32(input_len);
3447 /* total_len for smb query request never close to le16 max */
3448 req->InputBufferOffset = cpu_to_le16(total_len - 1);
3449 memcpy(req->Buffer, input, input_len);
3450 }
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003451
3452 iov[0].iov_base = (char *)req;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11003453 /* 1 for Buffer */
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003454 iov[0].iov_len = total_len - 1 + input_len;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003455 return 0;
3456}
3457
3458void
3459SMB2_query_info_free(struct smb_rqst *rqst)
3460{
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10003461 if (rqst && rqst->rq_iov)
3462 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003463}
3464
3465static int
3466query_info(const unsigned int xid, struct cifs_tcon *tcon,
3467 u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
3468 u32 additional_info, size_t output_len, size_t min_len, void **data,
3469 u32 *dlen)
3470{
3471 struct smb_rqst rqst;
3472 struct smb2_query_info_rsp *rsp = NULL;
3473 struct kvec iov[1];
3474 struct kvec rsp_iov;
3475 int rc = 0;
Garry McNultyef2298a2018-10-03 20:51:21 +01003476 int resp_buftype = CIFS_NO_BUFFER;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003477 struct cifs_ses *ses = tcon->ses;
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003478 struct TCP_Server_Info *server;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003479 int flags = 0;
Colin Ian King73aaf922019-01-16 16:28:59 +00003480 bool allocated = false;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003481
3482 cifs_dbg(FYI, "Query Info\n");
3483
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003484 if (!ses)
3485 return -EIO;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003486 server = cifs_pick_channel(ses);
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003487 if (!server)
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003488 return -EIO;
3489
3490 if (smb3_encryption_required(tcon))
3491 flags |= CIFS_TRANSFORM_REQ;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003492
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003493 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003494 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003495 rqst.rq_iov = iov;
3496 rqst.rq_nvec = 1;
3497
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003498 rc = SMB2_query_info_init(tcon, server,
3499 &rqst, persistent_fid, volatile_fid,
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003500 info_class, info_type, additional_info,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003501 output_len, 0, NULL);
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003502 if (rc)
3503 goto qinf_exit;
3504
Steve Frenchd42043a2019-02-26 21:58:30 -06003505 trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
3506 ses->Suid, info_class, (__u32)info_type);
3507
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003508 rc = cifs_send_recv(xid, ses, server,
3509 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003510 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovskye5d04882012-09-19 16:03:26 +04003511
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003512 if (rc) {
3513 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05003514 trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
3515 ses->Suid, info_class, (__u32)info_type, rc);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003516 goto qinf_exit;
3517 }
3518
Steve Frenchd42043a2019-02-26 21:58:30 -06003519 trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
3520 ses->Suid, info_class, (__u32)info_type);
3521
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003522 if (dlen) {
3523 *dlen = le32_to_cpu(rsp->OutputBufferLength);
3524 if (!*data) {
3525 *data = kmalloc(*dlen, GFP_KERNEL);
3526 if (!*data) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003527 cifs_tcon_dbg(VFS,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003528 "Error %d allocating memory for acl\n",
3529 rc);
3530 *dlen = 0;
Colin Ian King73aaf922019-01-16 16:28:59 +00003531 rc = -ENOMEM;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003532 goto qinf_exit;
3533 }
Colin Ian King73aaf922019-01-16 16:28:59 +00003534 allocated = true;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003535 }
3536 }
3537
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10003538 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
3539 le32_to_cpu(rsp->OutputBufferLength),
3540 &rsp_iov, min_len, *data);
Colin Ian King73aaf922019-01-16 16:28:59 +00003541 if (rc && allocated) {
3542 kfree(*data);
3543 *data = NULL;
3544 *dlen = 0;
3545 }
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003546
3547qinf_exit:
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003548 SMB2_query_info_free(&rqst);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003549 free_rsp_buf(resp_buftype, rsp);
3550 return rc;
3551}
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003552
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003553int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3554 u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003555{
3556 return query_info(xid, tcon, persistent_fid, volatile_fid,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003557 FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +04003558 sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003559 sizeof(struct smb2_file_all_info), (void **)&data,
3560 NULL);
3561}
3562
Steve Frenche0ae8a92021-06-19 16:19:09 -05003563#if 0
3564/* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003565int
Steve Frenchb1bc1872020-06-11 20:23:38 -05003566SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3567 u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
3568{
3569 size_t output_len = sizeof(struct smb311_posix_qinfo *) +
3570 (sizeof(struct cifs_sid) * 2) + (PATH_MAX * 2);
3571 *plen = 0;
3572
3573 return query_info(xid, tcon, persistent_fid, volatile_fid,
3574 SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
3575 output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
Steve Frenche0ae8a92021-06-19 16:19:09 -05003576 /* Note caller must free "data" (passed in above). It may be allocated in query_info call */
Steve Frenchb1bc1872020-06-11 20:23:38 -05003577}
Steve Frenche0ae8a92021-06-19 16:19:09 -05003578#endif
Steve Frenchb1bc1872020-06-11 20:23:38 -05003579
3580int
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003581SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003582 u64 persistent_fid, u64 volatile_fid,
Boris Protopopov9541b812020-12-17 20:58:08 +00003583 void **data, u32 *plen, u32 extra_info)
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003584{
Boris Protopopov9541b812020-12-17 20:58:08 +00003585 __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
3586 extra_info;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003587 *plen = 0;
3588
3589 return query_info(xid, tcon, persistent_fid, volatile_fid,
3590 0, SMB2_O_INFO_SECURITY, additional_info,
Shirish Pargaonkaree25c6d2018-06-04 06:46:22 -05003591 SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003592}
3593
3594int
3595SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
3596 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
3597{
3598 return query_info(xid, tcon, persistent_fid, volatile_fid,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003599 FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003600 sizeof(struct smb2_file_internal_info),
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003601 sizeof(struct smb2_file_internal_info),
3602 (void **)&uniqueid, NULL);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003603}
3604
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003605/*
Steve Frenchc3498182019-09-15 22:38:52 -05003606 * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory
3607 * See MS-SMB2 2.2.35 and 2.2.36
3608 */
3609
zhengbin388962e2019-09-23 15:06:18 +08003610static int
Steve Frenchc3498182019-09-15 22:38:52 -05003611SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003612 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3613 u64 persistent_fid, u64 volatile_fid,
3614 u32 completion_filter, bool watch_tree)
Steve Frenchc3498182019-09-15 22:38:52 -05003615{
3616 struct smb2_change_notify_req *req;
3617 struct kvec *iov = rqst->rq_iov;
3618 unsigned int total_len;
3619 int rc;
3620
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003621 rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
3622 (void **) &req, &total_len);
Steve Frenchc3498182019-09-15 22:38:52 -05003623 if (rc)
3624 return rc;
3625
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003626 req->PersistentFileId = cpu_to_le64(persistent_fid);
3627 req->VolatileFileId = cpu_to_le64(volatile_fid);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06003628 /* See note 354 of MS-SMB2, 64K max */
Steve French52870d52019-10-01 21:25:46 -05003629 req->OutputBufferLength =
3630 cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
Steve Frenchc3498182019-09-15 22:38:52 -05003631 req->CompletionFilter = cpu_to_le32(completion_filter);
3632 if (watch_tree)
3633 req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
3634 else
3635 req->Flags = 0;
3636
3637 iov[0].iov_base = (char *)req;
3638 iov[0].iov_len = total_len;
3639
3640 return 0;
3641}
3642
3643int
3644SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
3645 u64 persistent_fid, u64 volatile_fid, bool watch_tree,
3646 u32 completion_filter)
3647{
3648 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003649 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve Frenchc3498182019-09-15 22:38:52 -05003650 struct smb_rqst rqst;
3651 struct kvec iov[1];
3652 struct kvec rsp_iov = {NULL, 0};
3653 int resp_buftype = CIFS_NO_BUFFER;
3654 int flags = 0;
3655 int rc = 0;
3656
3657 cifs_dbg(FYI, "change notify\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003658 if (!ses || !server)
Steve Frenchc3498182019-09-15 22:38:52 -05003659 return -EIO;
3660
3661 if (smb3_encryption_required(tcon))
3662 flags |= CIFS_TRANSFORM_REQ;
3663
3664 memset(&rqst, 0, sizeof(struct smb_rqst));
3665 memset(&iov, 0, sizeof(iov));
3666 rqst.rq_iov = iov;
3667 rqst.rq_nvec = 1;
3668
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003669 rc = SMB2_notify_init(xid, &rqst, tcon, server,
3670 persistent_fid, volatile_fid,
Steve Frenchc3498182019-09-15 22:38:52 -05003671 completion_filter, watch_tree);
3672 if (rc)
3673 goto cnotify_exit;
3674
3675 trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
3676 (u8)watch_tree, completion_filter);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003677 rc = cifs_send_recv(xid, ses, server,
3678 &rqst, &resp_buftype, flags, &rsp_iov);
Steve Frenchc3498182019-09-15 22:38:52 -05003679
3680 if (rc != 0) {
3681 cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
3682 trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
3683 (u8)watch_tree, completion_filter, rc);
3684 } else
3685 trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
3686 ses->Suid, (u8)watch_tree, completion_filter);
3687
3688 cnotify_exit:
3689 if (rqst.rq_iov)
3690 cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
3691 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3692 return rc;
3693}
3694
3695
3696
3697/*
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003698 * This is a no-op for now. We're not really interested in the reply, but
3699 * rather in the fact that the server sent one and that server->lstrp
3700 * gets updated.
3701 *
3702 * FIXME: maybe we should consider checking that the reply matches request?
3703 */
3704static void
3705smb2_echo_callback(struct mid_q_entry *mid)
3706{
3707 struct TCP_Server_Info *server = mid->callback_data;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003708 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003709 struct cifs_credits credits = { .value = 0, .instance = 0 };
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003710
Pavel Shilovsky0fd1d372019-01-15 15:08:48 -08003711 if (mid->mid_state == MID_RESPONSE_RECEIVED
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003712 || mid->mid_state == MID_RESPONSE_MALFORMED) {
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003713 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003714 credits.instance = server->reconnect_instance;
3715 }
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003716
3717 DeleteMidQEntry(mid);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003718 add_credits(server, &credits, CIFS_ECHO_OP);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003719}
3720
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003721void smb2_reconnect_server(struct work_struct *work)
3722{
3723 struct TCP_Server_Info *server = container_of(work,
3724 struct TCP_Server_Info, reconnect.work);
3725 struct cifs_ses *ses;
3726 struct cifs_tcon *tcon, *tcon2;
3727 struct list_head tmp_list;
3728 int tcon_exist = false;
Germano Percossi18ea4312017-04-07 12:29:36 +01003729 int rc;
3730 int resched = false;
3731
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003732
3733 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
3734 mutex_lock(&server->reconnect_mutex);
3735
3736 INIT_LIST_HEAD(&tmp_list);
3737 cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
3738
3739 spin_lock(&cifs_tcp_ses_lock);
3740 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3741 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
Pavel Shilovsky96a988f2016-11-29 11:31:23 -08003742 if (tcon->need_reconnect || tcon->need_reopen_files) {
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003743 tcon->tc_count++;
3744 list_add_tail(&tcon->rlist, &tmp_list);
3745 tcon_exist = true;
3746 }
3747 }
Ronnie Sahlberg0ff2b012019-06-05 10:15:34 +10003748 /*
3749 * IPC has the same lifetime as its session and uses its
3750 * refcount.
3751 */
Aurelien Aptelb327a712018-01-24 13:46:10 +01003752 if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
3753 list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
3754 tcon_exist = true;
Ronnie Sahlberg0ff2b012019-06-05 10:15:34 +10003755 ses->ses_count++;
Aurelien Aptelb327a712018-01-24 13:46:10 +01003756 }
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003757 }
3758 /*
3759 * Get the reference to server struct to be sure that the last call of
3760 * cifs_put_tcon() in the loop below won't release the server pointer.
3761 */
3762 if (tcon_exist)
3763 server->srv_count++;
3764
3765 spin_unlock(&cifs_tcp_ses_lock);
3766
3767 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003768 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
Germano Percossi18ea4312017-04-07 12:29:36 +01003769 if (!rc)
Pavel Shilovsky96a988f2016-11-29 11:31:23 -08003770 cifs_reopen_persistent_handles(tcon);
Germano Percossi18ea4312017-04-07 12:29:36 +01003771 else
3772 resched = true;
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003773 list_del_init(&tcon->rlist);
Ronnie Sahlberg0ff2b012019-06-05 10:15:34 +10003774 if (tcon->ipc)
3775 cifs_put_smb_ses(tcon->ses);
3776 else
3777 cifs_put_tcon(tcon);
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003778 }
3779
3780 cifs_dbg(FYI, "Reconnecting tcons finished\n");
Germano Percossi18ea4312017-04-07 12:29:36 +01003781 if (resched)
3782 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003783 mutex_unlock(&server->reconnect_mutex);
3784
3785 /* now we can safely release srv struct */
3786 if (tcon_exist)
3787 cifs_put_tcp_session(server, 1);
3788}
3789
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003790int
3791SMB2_echo(struct TCP_Server_Info *server)
3792{
3793 struct smb2_echo_req *req;
3794 int rc = 0;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003795 struct kvec iov[1];
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08003796 struct smb_rqst rqst = { .rq_iov = iov,
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003797 .rq_nvec = 1 };
Ronnie Sahlberg7f7ae752017-11-09 12:14:21 +11003798 unsigned int total_len;
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003799
Joe Perchesf96637b2013-05-04 22:12:25 -05003800 cifs_dbg(FYI, "In echo request\n");
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003801
Steve French4fcd1812016-06-22 20:12:05 -05003802 if (server->tcpStatus == CifsNeedNegotiate) {
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003803 /* No need to send echo on newly established connections */
Stefan Metzmacherb08484d2020-02-24 14:14:59 +01003804 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003805 return rc;
Steve French4fcd1812016-06-22 20:12:05 -05003806 }
3807
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003808 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
3809 (void **)&req, &total_len);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003810 if (rc)
3811 return rc;
3812
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003813 req->hdr.CreditRequest = cpu_to_le16(1);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003814
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003815 iov[0].iov_len = total_len;
3816 iov[0].iov_base = (char *)req;
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003817
Pavel Shilovsky9b7c18a2016-11-16 14:06:17 -08003818 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -08003819 server, CIFS_ECHO_OP, NULL);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003820 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003821 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003822
3823 cifs_small_buf_release(req);
3824 return rc;
3825}
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003826
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003827void
3828SMB2_flush_free(struct smb_rqst *rqst)
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003829{
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003830 if (rqst && rqst->rq_iov)
3831 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3832}
3833
3834int
3835SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003836 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3837 u64 persistent_fid, u64 volatile_fid)
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003838{
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003839 struct smb2_flush_req *req;
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003840 struct kvec *iov = rqst->rq_iov;
Ronnie Sahlberg1f444e42017-11-20 11:24:39 +11003841 unsigned int total_len;
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003842 int rc;
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003843
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003844 rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
3845 (void **) &req, &total_len);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003846 if (rc)
3847 return rc;
3848
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003849 req->PersistentFileId = cpu_to_le64(persistent_fid);
3850 req->VolatileFileId = cpu_to_le64(volatile_fid);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003851
3852 iov[0].iov_base = (char *)req;
Ronnie Sahlberg1f444e42017-11-20 11:24:39 +11003853 iov[0].iov_len = total_len;
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003854
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003855 return 0;
3856}
3857
3858int
3859SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3860 u64 volatile_fid)
3861{
3862 struct cifs_ses *ses = tcon->ses;
3863 struct smb_rqst rqst;
3864 struct kvec iov[1];
3865 struct kvec rsp_iov = {NULL, 0};
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003866 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003867 int resp_buftype = CIFS_NO_BUFFER;
3868 int flags = 0;
3869 int rc = 0;
3870
3871 cifs_dbg(FYI, "flush\n");
3872 if (!ses || !(ses->server))
3873 return -EIO;
3874
3875 if (smb3_encryption_required(tcon))
3876 flags |= CIFS_TRANSFORM_REQ;
3877
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003878 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003879 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003880 rqst.rq_iov = iov;
3881 rqst.rq_nvec = 1;
3882
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003883 rc = SMB2_flush_init(xid, &rqst, tcon, server,
3884 persistent_fid, volatile_fid);
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003885 if (rc)
3886 goto flush_exit;
3887
Steve Frenchf90f9792019-09-03 18:35:42 -05003888 trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003889 rc = cifs_send_recv(xid, ses, server,
3890 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003891
Steve Frencheccb4422018-05-17 21:16:55 -05003892 if (rc != 0) {
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003893 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05003894 trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
3895 rc);
Steve Frenchf90f9792019-09-03 18:35:42 -05003896 } else
3897 trace_smb3_flush_done(xid, persistent_fid, tcon->tid,
3898 ses->Suid);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003899
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003900 flush_exit:
3901 SMB2_flush_free(&rqst);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003902 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003903 return rc;
3904}
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003905
3906/*
3907 * To form a chain of read requests, any read requests after the first should
3908 * have the end_of_chain boolean set to true.
3909 */
3910static int
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08003911smb2_new_read_req(void **buf, unsigned int *total_len,
Long Li2dabfd52017-11-07 01:54:53 -07003912 struct cifs_io_parms *io_parms, struct cifs_readdata *rdata,
3913 unsigned int remaining_bytes, int request_type)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003914{
3915 int rc = -EACCES;
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003916 struct smb2_read_req *req = NULL;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003917 struct smb2_hdr *shdr;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003918 struct TCP_Server_Info *server = io_parms->server;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003919
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003920 rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
3921 (void **) &req, total_len);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003922 if (rc)
3923 return rc;
Long Li2dabfd52017-11-07 01:54:53 -07003924
Long Li2dabfd52017-11-07 01:54:53 -07003925 if (server == NULL)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003926 return -ECONNABORTED;
3927
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003928 shdr = &req->hdr;
3929 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003930
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003931 req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
3932 req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003933 req->ReadChannelInfoOffset = 0; /* reserved */
3934 req->ReadChannelInfoLength = 0; /* reserved */
3935 req->Channel = 0; /* reserved */
3936 req->MinimumCount = 0;
3937 req->Length = cpu_to_le32(io_parms->length);
3938 req->Offset = cpu_to_le64(io_parms->offset);
Steve Frenchd323c2462019-02-25 00:52:43 -06003939
3940 trace_smb3_read_enter(0 /* xid */,
3941 io_parms->persistent_fid,
3942 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
3943 io_parms->offset, io_parms->length);
Long Libd3dcc62017-11-22 17:38:47 -07003944#ifdef CONFIG_CIFS_SMB_DIRECT
3945 /*
3946 * If we want to do a RDMA write, fill in and append
3947 * smbd_buffer_descriptor_v1 to the end of read request
3948 */
Long Libb4c0412018-04-17 12:17:08 -07003949 if (server->rdma && rdata && !server->sign &&
Long Libd3dcc62017-11-22 17:38:47 -07003950 rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003951
Long Libd3dcc62017-11-22 17:38:47 -07003952 struct smbd_buffer_descriptor_v1 *v1;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003953 bool need_invalidate = server->dialect == SMB30_PROT_ID;
Long Libd3dcc62017-11-22 17:38:47 -07003954
3955 rdata->mr = smbd_register_mr(
3956 server->smbd_conn, rdata->pages,
Long Li7cf20bc2018-05-30 12:48:02 -07003957 rdata->nr_pages, rdata->page_offset,
3958 rdata->tailsz, true, need_invalidate);
Long Libd3dcc62017-11-22 17:38:47 -07003959 if (!rdata->mr)
Long Lib7972092019-04-05 21:36:34 +00003960 return -EAGAIN;
Long Libd3dcc62017-11-22 17:38:47 -07003961
3962 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
3963 if (need_invalidate)
3964 req->Channel = SMB2_CHANNEL_RDMA_V1;
3965 req->ReadChannelInfoOffset =
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003966 cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
Long Libd3dcc62017-11-22 17:38:47 -07003967 req->ReadChannelInfoLength =
Steve French2026b062018-01-24 23:07:41 -06003968 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
Long Libd3dcc62017-11-22 17:38:47 -07003969 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
Steve French2026b062018-01-24 23:07:41 -06003970 v1->offset = cpu_to_le64(rdata->mr->mr->iova);
3971 v1->token = cpu_to_le32(rdata->mr->mr->rkey);
3972 v1->length = cpu_to_le32(rdata->mr->mr->length);
Long Libd3dcc62017-11-22 17:38:47 -07003973
3974 *total_len += sizeof(*v1) - 1;
3975 }
3976#endif
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003977 if (request_type & CHAINED_REQUEST) {
3978 if (!(request_type & END_OF_CHAIN)) {
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -08003979 /* next 8-byte aligned request */
3980 *total_len = DIV_ROUND_UP(*total_len, 8) * 8;
3981 shdr->NextCommand = cpu_to_le32(*total_len);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003982 } else /* END_OF_CHAIN */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003983 shdr->NextCommand = 0;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003984 if (request_type & RELATED_REQUEST) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003985 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003986 /*
3987 * Related requests use info from previous read request
3988 * in chain.
3989 */
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003990 shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
3991 shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003992 req->PersistentFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
3993 req->VolatileFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003994 }
3995 }
3996 if (remaining_bytes > io_parms->length)
3997 req->RemainingBytes = cpu_to_le32(remaining_bytes);
3998 else
3999 req->RemainingBytes = 0;
4000
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004001 *buf = req;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004002 return rc;
4003}
4004
4005static void
4006smb2_readv_callback(struct mid_q_entry *mid)
4007{
4008 struct cifs_readdata *rdata = mid->callback_data;
4009 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004010 struct TCP_Server_Info *server = rdata->server;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004011 struct smb2_hdr *shdr =
4012 (struct smb2_hdr *)rdata->iov[0].iov_base;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004013 struct cifs_credits credits = { .value = 0, .instance = 0 };
Steve French46f17d12019-09-04 23:07:52 -05004014 struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
4015 .rq_nvec = 1,
Jeff Layton8321fec2012-09-19 06:22:32 -07004016 .rq_pages = rdata->pages,
Long Li1dbe3462018-05-30 12:47:55 -07004017 .rq_offset = rdata->page_offset,
Jeff Layton8321fec2012-09-19 06:22:32 -07004018 .rq_npages = rdata->nr_pages,
4019 .rq_pagesz = rdata->pagesz,
4020 .rq_tailsz = rdata->tailsz };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004021
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004022 WARN_ONCE(rdata->server != mid->server,
4023 "rdata server %p != mid server %p",
4024 rdata->server, mid->server);
4025
Joe Perchesf96637b2013-05-04 22:12:25 -05004026 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
4027 __func__, mid->mid, mid->mid_state, rdata->result,
4028 rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004029
4030 switch (mid->mid_state) {
4031 case MID_RESPONSE_RECEIVED:
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004032 credits.value = le16_to_cpu(shdr->CreditRequest);
4033 credits.instance = server->reconnect_instance;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004034 /* result already set, check signature */
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004035 if (server->sign && !mid->decrypted) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07004036 int rc;
4037
Jeff Layton0b688cf2012-09-18 16:20:34 -07004038 rc = smb2_verify_signature(&rqst, server);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07004039 if (rc)
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004040 cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05004041 rc);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07004042 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004043 /* FIXME: should this be counted toward the initiating task? */
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04004044 task_io_account_read(rdata->got_bytes);
4045 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004046 break;
4047 case MID_REQUEST_SUBMITTED:
4048 case MID_RETRY_NEEDED:
4049 rdata->result = -EAGAIN;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04004050 if (server->sign && rdata->got_bytes)
4051 /* reset bytes number since we can not check a sign */
4052 rdata->got_bytes = 0;
4053 /* FIXME: should this be counted toward the initiating task? */
4054 task_io_account_read(rdata->got_bytes);
4055 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004056 break;
Pavel Shilovsky0fd1d372019-01-15 15:08:48 -08004057 case MID_RESPONSE_MALFORMED:
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004058 credits.value = le16_to_cpu(shdr->CreditRequest);
4059 credits.instance = server->reconnect_instance;
Miaohe Lin30b5ae22020-08-08 16:36:37 +08004060 fallthrough;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004061 default:
Pavel Shilovsky6b15eb12019-01-18 15:46:14 -08004062 rdata->result = -EIO;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004063 }
Long Libd3dcc62017-11-22 17:38:47 -07004064#ifdef CONFIG_CIFS_SMB_DIRECT
4065 /*
4066 * If this rdata has a memmory registered, the MR can be freed
4067 * MR needs to be freed as soon as I/O finishes to prevent deadlock
4068 * because they have limited number and are used for future I/Os
4069 */
4070 if (rdata->mr) {
4071 smbd_deregister_mr(rdata->mr);
4072 rdata->mr = NULL;
4073 }
4074#endif
Pavel Shilovsky082aaa82019-01-18 15:54:34 -08004075 if (rdata->result && rdata->result != -ENODATA) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004076 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004077 trace_smb3_read_err(0 /* xid */,
4078 rdata->cfile->fid.persistent_fid,
4079 tcon->tid, tcon->ses->Suid, rdata->offset,
4080 rdata->bytes, rdata->result);
4081 } else
4082 trace_smb3_read_done(0 /* xid */,
4083 rdata->cfile->fid.persistent_fid,
4084 tcon->tid, tcon->ses->Suid,
4085 rdata->offset, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004086
4087 queue_work(cifsiod_wq, &rdata->work);
4088 DeleteMidQEntry(mid);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004089 add_credits(server, &credits, 0);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004090}
4091
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004092/* smb2_async_readv - send an async read, and set up mid to handle result */
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004093int
4094smb2_async_readv(struct cifs_readdata *rdata)
4095{
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004096 int rc, flags = 0;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004097 char *buf;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004098 struct smb2_hdr *shdr;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004099 struct cifs_io_parms io_parms;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004100 struct smb_rqst rqst = { .rq_iov = rdata->iov,
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004101 .rq_nvec = 1 };
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004102 struct TCP_Server_Info *server;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004103 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004104 unsigned int total_len;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004105
Joe Perchesf96637b2013-05-04 22:12:25 -05004106 cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
4107 __func__, rdata->offset, rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004108
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004109 if (!rdata->server)
4110 rdata->server = cifs_pick_channel(tcon->ses);
4111
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004112 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004113 io_parms.server = server = rdata->server;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004114 io_parms.offset = rdata->offset;
4115 io_parms.length = rdata->bytes;
4116 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
4117 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
4118 io_parms.pid = rdata->pid;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004119
Long Li2dabfd52017-11-07 01:54:53 -07004120 rc = smb2_new_read_req(
4121 (void **) &buf, &total_len, &io_parms, rdata, 0, 0);
Pavel Shilovskyf0b93cb2019-01-25 11:10:00 -08004122 if (rc)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004123 return rc;
4124
Steve French5a77e752018-05-09 17:43:08 -05004125 if (smb3_encryption_required(io_parms.tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004126 flags |= CIFS_TRANSFORM_REQ;
4127
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004128 rdata->iov[0].iov_base = buf;
4129 rdata->iov[0].iov_len = total_len;
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -08004130
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004131 shdr = (struct smb2_hdr *)buf;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004132
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004133 if (rdata->credits.value > 0) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004134 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004135 SMB2_MAX_BUFFER_SIZE));
Aurelien Aptel88fd98a2021-03-04 17:51:48 +00004136 shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004137
4138 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4139 if (rc)
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004140 goto async_readv_out;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004141
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004142 flags |= CIFS_HAS_CREDITS;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004143 }
4144
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004145 kref_get(&rdata->refcount);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004146 rc = cifs_call_async(server, &rqst,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004147 cifs_readv_receive, smb2_readv_callback,
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -08004148 smb3_handle_read_data, rdata, flags,
4149 &rdata->credits);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004150 if (rc) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004151 kref_put(&rdata->refcount, cifs_readdata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004152 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004153 trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
4154 io_parms.tcon->tid,
4155 io_parms.tcon->ses->Suid,
4156 io_parms.offset, io_parms.length, rc);
4157 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004158
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004159async_readv_out:
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004160 cifs_small_buf_release(buf);
4161 return rc;
4162}
Pavel Shilovsky33319142012-09-18 16:20:29 -07004163
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004164int
4165SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
4166 unsigned int *nbytes, char **buf, int *buf_type)
4167{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004168 struct smb_rqst rqst;
Colin Ian King1efd4fc2019-07-31 10:05:26 +01004169 int resp_buftype, rc;
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004170 struct smb2_read_req *req = NULL;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004171 struct smb2_read_rsp *rsp = NULL;
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004172 struct kvec iov[1];
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004173 struct kvec rsp_iov;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004174 unsigned int total_len;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004175 int flags = CIFS_LOG_ERROR;
4176 struct cifs_ses *ses = io_parms->tcon->ses;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004177
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004178 if (!io_parms->server)
4179 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4180
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004181 *nbytes = 0;
Long Li2dabfd52017-11-07 01:54:53 -07004182 rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004183 if (rc)
4184 return rc;
4185
Steve French5a77e752018-05-09 17:43:08 -05004186 if (smb3_encryption_required(io_parms->tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004187 flags |= CIFS_TRANSFORM_REQ;
4188
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004189 iov[0].iov_base = (char *)req;
4190 iov[0].iov_len = total_len;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004191
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004192 memset(&rqst, 0, sizeof(struct smb_rqst));
4193 rqst.rq_iov = iov;
4194 rqst.rq_nvec = 1;
4195
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004196 rc = cifs_send_recv(xid, ses, io_parms->server,
4197 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004198 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004199
4200 if (rc) {
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004201 if (rc != -ENODATA) {
4202 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
4203 cifs_dbg(VFS, "Send error in read = %d\n", rc);
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004204 trace_smb3_read_err(xid,
4205 le64_to_cpu(req->PersistentFileId),
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004206 io_parms->tcon->tid, ses->Suid,
4207 io_parms->offset, io_parms->length,
4208 rc);
Steve Frenchb0a42f22019-02-25 15:02:58 -06004209 } else
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004210 trace_smb3_read_done(xid,
4211 le64_to_cpu(req->PersistentFileId),
4212 io_parms->tcon->tid, ses->Suid,
4213 io_parms->offset, 0);
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004214 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Ronnie Sahlberg05fd5c22019-04-23 16:39:45 +10004215 cifs_small_buf_release(req);
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004216 return rc == -ENODATA ? 0 : rc;
Steve Frencheccb4422018-05-17 21:16:55 -05004217 } else
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004218 trace_smb3_read_done(xid,
4219 le64_to_cpu(req->PersistentFileId),
Steve Frencheccb4422018-05-17 21:16:55 -05004220 io_parms->tcon->tid, ses->Suid,
4221 io_parms->offset, io_parms->length);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004222
ZhangXiaoxu088aaf12019-04-06 15:47:39 +08004223 cifs_small_buf_release(req);
4224
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004225 *nbytes = le32_to_cpu(rsp->DataLength);
4226 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
4227 (*nbytes > io_parms->length)) {
4228 cifs_dbg(FYI, "bad length %d for count %d\n",
4229 *nbytes, io_parms->length);
4230 rc = -EIO;
4231 *nbytes = 0;
4232 }
4233
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004234 if (*buf) {
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004235 memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004236 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004237 } else if (resp_buftype != CIFS_NO_BUFFER) {
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004238 *buf = rsp_iov.iov_base;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004239 if (resp_buftype == CIFS_SMALL_BUFFER)
4240 *buf_type = CIFS_SMALL_BUFFER;
4241 else if (resp_buftype == CIFS_LARGE_BUFFER)
4242 *buf_type = CIFS_LARGE_BUFFER;
4243 }
4244 return rc;
4245}
4246
Pavel Shilovsky33319142012-09-18 16:20:29 -07004247/*
4248 * Check the mid_state and signature on received buffer (if any), and queue the
4249 * workqueue completion task.
4250 */
4251static void
4252smb2_writev_callback(struct mid_q_entry *mid)
4253{
4254 struct cifs_writedata *wdata = mid->callback_data;
4255 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004256 struct TCP_Server_Info *server = wdata->server;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004257 unsigned int written;
4258 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004259 struct cifs_credits credits = { .value = 0, .instance = 0 };
Pavel Shilovsky33319142012-09-18 16:20:29 -07004260
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004261 WARN_ONCE(wdata->server != mid->server,
4262 "wdata server %p != mid server %p",
4263 wdata->server, mid->server);
4264
Pavel Shilovsky33319142012-09-18 16:20:29 -07004265 switch (mid->mid_state) {
4266 case MID_RESPONSE_RECEIVED:
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004267 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004268 credits.instance = server->reconnect_instance;
4269 wdata->result = smb2_check_receive(mid, server, 0);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004270 if (wdata->result != 0)
4271 break;
4272
4273 written = le32_to_cpu(rsp->DataLength);
4274 /*
4275 * Mask off high 16 bits when bytes written as returned
4276 * by the server is greater than bytes requested by the
4277 * client. OS/2 servers are known to set incorrect
4278 * CountHigh values.
4279 */
4280 if (written > wdata->bytes)
4281 written &= 0xFFFF;
4282
4283 if (written < wdata->bytes)
4284 wdata->result = -ENOSPC;
4285 else
4286 wdata->bytes = written;
4287 break;
4288 case MID_REQUEST_SUBMITTED:
4289 case MID_RETRY_NEEDED:
4290 wdata->result = -EAGAIN;
4291 break;
Pavel Shilovsky0fd1d372019-01-15 15:08:48 -08004292 case MID_RESPONSE_MALFORMED:
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004293 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004294 credits.instance = server->reconnect_instance;
Miaohe Lin30b5ae22020-08-08 16:36:37 +08004295 fallthrough;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004296 default:
4297 wdata->result = -EIO;
4298 break;
4299 }
Long Lidb223a52017-11-22 17:38:45 -07004300#ifdef CONFIG_CIFS_SMB_DIRECT
4301 /*
4302 * If this wdata has a memory registered, the MR can be freed
4303 * The number of MRs available is limited, it's important to recover
4304 * used MR as soon as I/O is finished. Hold MR longer in the later
4305 * I/O process can possibly result in I/O deadlock due to lack of MR
4306 * to send request on I/O retry
4307 */
4308 if (wdata->mr) {
4309 smbd_deregister_mr(wdata->mr);
4310 wdata->mr = NULL;
4311 }
4312#endif
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004313 if (wdata->result) {
Pavel Shilovsky33319142012-09-18 16:20:29 -07004314 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004315 trace_smb3_write_err(0 /* no xid */,
4316 wdata->cfile->fid.persistent_fid,
4317 tcon->tid, tcon->ses->Suid, wdata->offset,
4318 wdata->bytes, wdata->result);
Steve Frenchd6fd4192020-02-05 16:52:11 -06004319 if (wdata->result == -ENOSPC)
Joe Perchesa0a30362020-04-14 22:42:53 -07004320 pr_warn_once("Out of space writing to %s\n",
4321 tcon->treeName);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004322 } else
4323 trace_smb3_write_done(0 /* no xid */,
4324 wdata->cfile->fid.persistent_fid,
4325 tcon->tid, tcon->ses->Suid,
4326 wdata->offset, wdata->bytes);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004327
4328 queue_work(cifsiod_wq, &wdata->work);
4329 DeleteMidQEntry(mid);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004330 add_credits(server, &credits, 0);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004331}
4332
4333/* smb2_async_writev - send an async write, and set up mid to handle result */
4334int
Steve French4a5c80d2014-02-07 20:45:12 -06004335smb2_async_writev(struct cifs_writedata *wdata,
4336 void (*release)(struct kref *kref))
Pavel Shilovsky33319142012-09-18 16:20:29 -07004337{
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004338 int rc = -EACCES, flags = 0;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004339 struct smb2_write_req *req = NULL;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004340 struct smb2_hdr *shdr;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004341 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004342 struct TCP_Server_Info *server = wdata->server;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004343 struct kvec iov[1];
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004344 struct smb_rqst rqst = { };
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004345 unsigned int total_len;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004346
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004347 if (!wdata->server)
4348 server = wdata->server = cifs_pick_channel(tcon->ses);
4349
4350 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
4351 (void **) &req, &total_len);
Pavel Shilovskyf0b93cb2019-01-25 11:10:00 -08004352 if (rc)
4353 return rc;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004354
Steve French5a77e752018-05-09 17:43:08 -05004355 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004356 flags |= CIFS_TRANSFORM_REQ;
4357
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004358 shdr = (struct smb2_hdr *)req;
4359 shdr->Id.SyncId.ProcessId = cpu_to_le32(wdata->cfile->pid);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004360
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004361 req->PersistentFileId = cpu_to_le64(wdata->cfile->fid.persistent_fid);
4362 req->VolatileFileId = cpu_to_le64(wdata->cfile->fid.volatile_fid);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004363 req->WriteChannelInfoOffset = 0;
4364 req->WriteChannelInfoLength = 0;
4365 req->Channel = 0;
4366 req->Offset = cpu_to_le64(wdata->offset);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004367 req->DataOffset = cpu_to_le16(
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004368 offsetof(struct smb2_write_req, Buffer));
Pavel Shilovsky33319142012-09-18 16:20:29 -07004369 req->RemainingBytes = 0;
Steve Frenchd323c2462019-02-25 00:52:43 -06004370
4371 trace_smb3_write_enter(0 /* xid */, wdata->cfile->fid.persistent_fid,
4372 tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes);
Long Lidb223a52017-11-22 17:38:45 -07004373#ifdef CONFIG_CIFS_SMB_DIRECT
4374 /*
4375 * If we want to do a server RDMA read, fill in and append
4376 * smbd_buffer_descriptor_v1 to the end of write request
4377 */
Long Libb4c0412018-04-17 12:17:08 -07004378 if (server->rdma && !server->sign && wdata->bytes >=
Long Lidb223a52017-11-22 17:38:45 -07004379 server->smbd_conn->rdma_readwrite_threshold) {
Pavel Shilovsky33319142012-09-18 16:20:29 -07004380
Long Lidb223a52017-11-22 17:38:45 -07004381 struct smbd_buffer_descriptor_v1 *v1;
4382 bool need_invalidate = server->dialect == SMB30_PROT_ID;
4383
4384 wdata->mr = smbd_register_mr(
4385 server->smbd_conn, wdata->pages,
Long Li7cf20bc2018-05-30 12:48:02 -07004386 wdata->nr_pages, wdata->page_offset,
4387 wdata->tailsz, false, need_invalidate);
Long Lidb223a52017-11-22 17:38:45 -07004388 if (!wdata->mr) {
Long Lib7972092019-04-05 21:36:34 +00004389 rc = -EAGAIN;
Long Lidb223a52017-11-22 17:38:45 -07004390 goto async_writev_out;
4391 }
4392 req->Length = 0;
4393 req->DataOffset = 0;
Long Li7cf20bc2018-05-30 12:48:02 -07004394 if (wdata->nr_pages > 1)
4395 req->RemainingBytes =
4396 cpu_to_le32(
4397 (wdata->nr_pages - 1) * wdata->pagesz -
4398 wdata->page_offset + wdata->tailsz
4399 );
4400 else
4401 req->RemainingBytes = cpu_to_le32(wdata->tailsz);
Long Lidb223a52017-11-22 17:38:45 -07004402 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
4403 if (need_invalidate)
4404 req->Channel = SMB2_CHANNEL_RDMA_V1;
4405 req->WriteChannelInfoOffset =
Steve French2026b062018-01-24 23:07:41 -06004406 cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
Long Lidb223a52017-11-22 17:38:45 -07004407 req->WriteChannelInfoLength =
Steve French2026b062018-01-24 23:07:41 -06004408 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
Long Lidb223a52017-11-22 17:38:45 -07004409 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
Steve French2026b062018-01-24 23:07:41 -06004410 v1->offset = cpu_to_le64(wdata->mr->mr->iova);
4411 v1->token = cpu_to_le32(wdata->mr->mr->rkey);
4412 v1->length = cpu_to_le32(wdata->mr->mr->length);
Long Lidb223a52017-11-22 17:38:45 -07004413 }
4414#endif
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004415 iov[0].iov_len = total_len - 1;
4416 iov[0].iov_base = (char *)req;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004417
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004418 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004419 rqst.rq_nvec = 1;
Jeff Laytoneddb0792012-09-18 16:20:35 -07004420 rqst.rq_pages = wdata->pages;
Long Li57a929a2018-05-30 12:47:53 -07004421 rqst.rq_offset = wdata->page_offset;
Jeff Laytoneddb0792012-09-18 16:20:35 -07004422 rqst.rq_npages = wdata->nr_pages;
4423 rqst.rq_pagesz = wdata->pagesz;
4424 rqst.rq_tailsz = wdata->tailsz;
Long Lidb223a52017-11-22 17:38:45 -07004425#ifdef CONFIG_CIFS_SMB_DIRECT
4426 if (wdata->mr) {
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004427 iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
Long Lidb223a52017-11-22 17:38:45 -07004428 rqst.rq_npages = 0;
4429 }
4430#endif
Joe Perchesf96637b2013-05-04 22:12:25 -05004431 cifs_dbg(FYI, "async write at %llu %u bytes\n",
4432 wdata->offset, wdata->bytes);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004433
Long Lidb223a52017-11-22 17:38:45 -07004434#ifdef CONFIG_CIFS_SMB_DIRECT
4435 /* For RDMA read, I/O size is in RemainingBytes not in Length */
4436 if (!wdata->mr)
4437 req->Length = cpu_to_le32(wdata->bytes);
4438#else
Pavel Shilovsky33319142012-09-18 16:20:29 -07004439 req->Length = cpu_to_le32(wdata->bytes);
Long Lidb223a52017-11-22 17:38:45 -07004440#endif
Pavel Shilovsky33319142012-09-18 16:20:29 -07004441
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004442 if (wdata->credits.value > 0) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004443 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004444 SMB2_MAX_BUFFER_SIZE));
Aurelien Aptel88fd98a2021-03-04 17:51:48 +00004445 shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004446
4447 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
4448 if (rc)
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004449 goto async_writev_out;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004450
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004451 flags |= CIFS_HAS_CREDITS;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004452 }
4453
Pavel Shilovsky33319142012-09-18 16:20:29 -07004454 kref_get(&wdata->refcount);
Pavel Shilovsky9b7c18a2016-11-16 14:06:17 -08004455 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -08004456 wdata, flags, &wdata->credits);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004457
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004458 if (rc) {
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004459 trace_smb3_write_err(0 /* no xid */,
4460 le64_to_cpu(req->PersistentFileId),
Steve Frencheccb4422018-05-17 21:16:55 -05004461 tcon->tid, tcon->ses->Suid, wdata->offset,
4462 wdata->bytes, rc);
Steve French4a5c80d2014-02-07 20:45:12 -06004463 kref_put(&wdata->refcount, release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004464 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004465 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07004466
Pavel Shilovsky33319142012-09-18 16:20:29 -07004467async_writev_out:
4468 cifs_small_buf_release(req);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004469 return rc;
4470}
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004471
4472/*
4473 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
4474 * The length field from io_parms must be at least 1 and indicates a number of
4475 * elements with data to write that begins with position 1 in iov array. All
4476 * data length is specified by count.
4477 */
4478int
4479SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
4480 unsigned int *nbytes, struct kvec *iov, int n_vec)
4481{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004482 struct smb_rqst rqst;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004483 int rc = 0;
4484 struct smb2_write_req *req = NULL;
4485 struct smb2_write_rsp *rsp = NULL;
4486 int resp_buftype;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004487 struct kvec rsp_iov;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004488 int flags = 0;
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004489 unsigned int total_len;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004490 struct TCP_Server_Info *server;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004491
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004492 *nbytes = 0;
4493
4494 if (n_vec < 1)
4495 return rc;
4496
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004497 if (!io_parms->server)
4498 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4499 server = io_parms->server;
4500 if (server == NULL)
4501 return -ECONNABORTED;
4502
4503 rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
4504 (void **) &req, &total_len);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004505 if (rc)
4506 return rc;
4507
Steve French5a77e752018-05-09 17:43:08 -05004508 if (smb3_encryption_required(io_parms->tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004509 flags |= CIFS_TRANSFORM_REQ;
4510
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004511 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004512
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004513 req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
4514 req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004515 req->WriteChannelInfoOffset = 0;
4516 req->WriteChannelInfoLength = 0;
4517 req->Channel = 0;
4518 req->Length = cpu_to_le32(io_parms->length);
4519 req->Offset = cpu_to_le64(io_parms->offset);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004520 req->DataOffset = cpu_to_le16(
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004521 offsetof(struct smb2_write_req, Buffer));
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004522 req->RemainingBytes = 0;
4523
Steve Frenchd323c2462019-02-25 00:52:43 -06004524 trace_smb3_write_enter(xid, io_parms->persistent_fid,
4525 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
4526 io_parms->offset, io_parms->length);
4527
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004528 iov[0].iov_base = (char *)req;
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004529 /* 1 for Buffer */
4530 iov[0].iov_len = total_len - 1;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004531
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004532 memset(&rqst, 0, sizeof(struct smb_rqst));
4533 rqst.rq_iov = iov;
4534 rqst.rq_nvec = n_vec + 1;
4535
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004536 rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
4537 &rqst,
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004538 &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004539 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004540
4541 if (rc) {
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004542 trace_smb3_write_err(xid,
4543 le64_to_cpu(req->PersistentFileId),
Steve Frencheccb4422018-05-17 21:16:55 -05004544 io_parms->tcon->tid,
4545 io_parms->tcon->ses->Suid,
4546 io_parms->offset, io_parms->length, rc);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004547 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05004548 cifs_dbg(VFS, "Send error in write = %d\n", rc);
Steve Frencheccb4422018-05-17 21:16:55 -05004549 } else {
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004550 *nbytes = le32_to_cpu(rsp->DataLength);
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004551 trace_smb3_write_done(xid,
4552 le64_to_cpu(req->PersistentFileId),
4553 io_parms->tcon->tid,
4554 io_parms->tcon->ses->Suid,
4555 io_parms->offset, *nbytes);
Steve Frencheccb4422018-05-17 21:16:55 -05004556 }
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004557
ZhangXiaoxu6a3eb332019-04-06 15:47:38 +08004558 cifs_small_buf_release(req);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004559 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004560 return rc;
4561}
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004562
Aurelien Aptel69dda302020-03-02 17:53:22 +01004563int posix_info_sid_size(const void *beg, const void *end)
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004564{
4565 size_t subauth;
4566 int total;
4567
4568 if (beg + 1 > end)
4569 return -1;
4570
4571 subauth = *(u8 *)(beg+1);
4572 if (subauth < 1 || subauth > 15)
4573 return -1;
4574
4575 total = 1 + 1 + 6 + 4*subauth;
4576 if (beg + total > end)
4577 return -1;
4578
4579 return total;
4580}
4581
4582int posix_info_parse(const void *beg, const void *end,
4583 struct smb2_posix_info_parsed *out)
4584
4585{
4586 int total_len = 0;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004587 int owner_len, group_len;
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004588 int name_len;
4589 const void *owner_sid;
4590 const void *group_sid;
4591 const void *name;
4592
4593 /* if no end bound given, assume payload to be correct */
4594 if (!end) {
4595 const struct smb2_posix_info *p = beg;
4596
4597 end = beg + le32_to_cpu(p->NextEntryOffset);
4598 /* last element will have a 0 offset, pick a sensible bound */
4599 if (end == beg)
4600 end += 0xFFFF;
4601 }
4602
4603 /* check base buf */
4604 if (beg + sizeof(struct smb2_posix_info) > end)
4605 return -1;
4606 total_len = sizeof(struct smb2_posix_info);
4607
4608 /* check owner sid */
4609 owner_sid = beg + total_len;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004610 owner_len = posix_info_sid_size(owner_sid, end);
4611 if (owner_len < 0)
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004612 return -1;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004613 total_len += owner_len;
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004614
4615 /* check group sid */
4616 group_sid = beg + total_len;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004617 group_len = posix_info_sid_size(group_sid, end);
4618 if (group_len < 0)
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004619 return -1;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004620 total_len += group_len;
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004621
4622 /* check name len */
4623 if (beg + total_len + 4 > end)
4624 return -1;
4625 name_len = le32_to_cpu(*(__le32 *)(beg + total_len));
4626 if (name_len < 1 || name_len > 0xFFFF)
4627 return -1;
4628 total_len += 4;
4629
4630 /* check name */
4631 name = beg + total_len;
4632 if (name + name_len > end)
4633 return -1;
4634 total_len += name_len;
4635
4636 if (out) {
4637 out->base = beg;
4638 out->size = total_len;
4639 out->name_len = name_len;
4640 out->name = name;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004641 memcpy(&out->owner, owner_sid, owner_len);
4642 memcpy(&out->group, group_sid, group_len);
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004643 }
4644 return total_len;
4645}
4646
4647static int posix_info_extra_size(const void *beg, const void *end)
4648{
4649 int len = posix_info_parse(beg, end, NULL);
4650
4651 if (len < 0)
4652 return -1;
4653 return len - sizeof(struct smb2_posix_info);
4654}
4655
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004656static unsigned int
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004657num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
4658 size_t size)
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004659{
4660 int len;
4661 unsigned int entrycount = 0;
4662 unsigned int next_offset = 0;
Dan Carpenter56446f22018-09-06 12:48:22 +03004663 char *entryptr;
4664 FILE_DIRECTORY_INFO *dir_info;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004665
4666 if (bufstart == NULL)
4667 return 0;
4668
Dan Carpenter56446f22018-09-06 12:48:22 +03004669 entryptr = bufstart;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004670
4671 while (1) {
Dan Carpenter56446f22018-09-06 12:48:22 +03004672 if (entryptr + next_offset < entryptr ||
4673 entryptr + next_offset > end_of_buf ||
4674 entryptr + next_offset + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004675 cifs_dbg(VFS, "malformed search entry would overflow\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004676 break;
4677 }
4678
Dan Carpenter56446f22018-09-06 12:48:22 +03004679 entryptr = entryptr + next_offset;
4680 dir_info = (FILE_DIRECTORY_INFO *)entryptr;
4681
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004682 if (infotype == SMB_FIND_FILE_POSIX_INFO)
4683 len = posix_info_extra_size(entryptr, end_of_buf);
4684 else
4685 len = le32_to_cpu(dir_info->FileNameLength);
4686
4687 if (len < 0 ||
4688 entryptr + len < entryptr ||
Dan Carpenter56446f22018-09-06 12:48:22 +03004689 entryptr + len > end_of_buf ||
4690 entryptr + len + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004691 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
4692 end_of_buf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004693 break;
4694 }
4695
Dan Carpenter56446f22018-09-06 12:48:22 +03004696 *lastentry = entryptr;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004697 entrycount++;
4698
Dan Carpenter56446f22018-09-06 12:48:22 +03004699 next_offset = le32_to_cpu(dir_info->NextEntryOffset);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004700 if (!next_offset)
4701 break;
4702 }
4703
4704 return entrycount;
4705}
4706
4707/*
4708 * Readdir/FindFirst
4709 */
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004710int SMB2_query_directory_init(const unsigned int xid,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004711 struct cifs_tcon *tcon,
4712 struct TCP_Server_Info *server,
4713 struct smb_rqst *rqst,
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004714 u64 persistent_fid, u64 volatile_fid,
4715 int index, int info_level)
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004716{
4717 struct smb2_query_directory_req *req;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004718 unsigned char *bufptr;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004719 __le16 asteriks = cpu_to_le16('*');
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004720 unsigned int output_size = CIFSMaxBufSize -
4721 MAX_SMB2_CREATE_RESPONSE_SIZE -
4722 MAX_SMB2_CLOSE_RESPONSE_SIZE;
Ronnie Sahlberg7c00c3a2017-11-20 11:24:45 +11004723 unsigned int total_len;
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004724 struct kvec *iov = rqst->rq_iov;
4725 int len, rc;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004726
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004727 rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
4728 (void **) &req, &total_len);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004729 if (rc)
4730 return rc;
4731
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004732 switch (info_level) {
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004733 case SMB_FIND_FILE_DIRECTORY_INFO:
4734 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004735 break;
4736 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
4737 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004738 break;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004739 case SMB_FIND_FILE_POSIX_INFO:
4740 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
4741 break;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004742 default:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004743 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004744 info_level);
4745 return -EINVAL;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004746 }
4747
4748 req->FileIndex = cpu_to_le32(index);
4749 req->PersistentFileId = persistent_fid;
4750 req->VolatileFileId = volatile_fid;
4751
4752 len = 0x2;
4753 bufptr = req->Buffer;
4754 memcpy(bufptr, &asteriks, len);
4755
4756 req->FileNameOffset =
Ronnie Sahlberg7c00c3a2017-11-20 11:24:45 +11004757 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004758 req->FileNameLength = cpu_to_le16(len);
4759 /*
4760 * BB could be 30 bytes or so longer if we used SMB2 specific
4761 * buffer lengths, but this is safe and close enough.
4762 */
4763 output_size = min_t(unsigned int, output_size, server->maxBuf);
4764 output_size = min_t(unsigned int, output_size, 2 << 15);
4765 req->OutputBufferLength = cpu_to_le32(output_size);
4766
4767 iov[0].iov_base = (char *)req;
Ronnie Sahlberg7c00c3a2017-11-20 11:24:45 +11004768 /* 1 for Buffer */
4769 iov[0].iov_len = total_len - 1;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004770
4771 iov[1].iov_base = (char *)(req->Buffer);
4772 iov[1].iov_len = len;
4773
Steve Frenchd323c2462019-02-25 00:52:43 -06004774 trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
4775 tcon->ses->Suid, index, output_size);
4776
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004777 return 0;
4778}
4779
4780void SMB2_query_directory_free(struct smb_rqst *rqst)
4781{
4782 if (rqst && rqst->rq_iov) {
4783 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4784 }
4785}
4786
4787int
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004788smb2_parse_query_directory(struct cifs_tcon *tcon,
4789 struct kvec *rsp_iov,
4790 int resp_buftype,
4791 struct cifs_search_info *srch_inf)
4792{
4793 struct smb2_query_directory_rsp *rsp;
4794 size_t info_buf_size;
4795 char *end_of_smb;
4796 int rc;
4797
4798 rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
4799
4800 switch (srch_inf->info_level) {
4801 case SMB_FIND_FILE_DIRECTORY_INFO:
4802 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
4803 break;
4804 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
4805 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
4806 break;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004807 case SMB_FIND_FILE_POSIX_INFO:
4808 /* note that posix payload are variable size */
4809 info_buf_size = sizeof(struct smb2_posix_info);
4810 break;
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004811 default:
4812 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
4813 srch_inf->info_level);
4814 return -EINVAL;
4815 }
4816
4817 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
4818 le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
4819 info_buf_size);
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004820 if (rc) {
4821 cifs_tcon_dbg(VFS, "bad info payload");
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004822 return rc;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004823 }
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004824
4825 srch_inf->unicode = true;
4826
4827 if (srch_inf->ntwrk_buf_start) {
4828 if (srch_inf->smallBuf)
4829 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
4830 else
4831 cifs_buf_release(srch_inf->ntwrk_buf_start);
4832 }
4833 srch_inf->ntwrk_buf_start = (char *)rsp;
4834 srch_inf->srch_entries_start = srch_inf->last_entry =
4835 (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
4836 end_of_smb = rsp_iov->iov_len + (char *)rsp;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004837
4838 srch_inf->entries_in_buffer = num_entries(
4839 srch_inf->info_level,
4840 srch_inf->srch_entries_start,
4841 end_of_smb,
4842 &srch_inf->last_entry,
4843 info_buf_size);
4844
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004845 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
4846 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
4847 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
4848 srch_inf->srch_entries_start, srch_inf->last_entry);
4849 if (resp_buftype == CIFS_LARGE_BUFFER)
4850 srch_inf->smallBuf = false;
4851 else if (resp_buftype == CIFS_SMALL_BUFFER)
4852 srch_inf->smallBuf = true;
4853 else
Joe Perchesa0a30362020-04-14 22:42:53 -07004854 cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004855
4856 return 0;
4857}
4858
4859int
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004860SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
4861 u64 persistent_fid, u64 volatile_fid, int index,
4862 struct cifs_search_info *srch_inf)
4863{
4864 struct smb_rqst rqst;
4865 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
4866 struct smb2_query_directory_rsp *rsp = NULL;
4867 int resp_buftype = CIFS_NO_BUFFER;
4868 struct kvec rsp_iov;
4869 int rc = 0;
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004870 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004871 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004872 int flags = 0;
4873
YueHaibingc4985c32020-01-17 10:57:17 +08004874 if (!ses || !(ses->server))
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004875 return -EIO;
4876
4877 if (smb3_encryption_required(tcon))
4878 flags |= CIFS_TRANSFORM_REQ;
4879
4880 memset(&rqst, 0, sizeof(struct smb_rqst));
4881 memset(&iov, 0, sizeof(iov));
4882 rqst.rq_iov = iov;
4883 rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
4884
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004885 rc = SMB2_query_directory_init(xid, tcon, server,
4886 &rqst, persistent_fid,
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004887 volatile_fid, index,
4888 srch_inf->info_level);
4889 if (rc)
4890 goto qdir_exit;
4891
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004892 rc = cifs_send_recv(xid, ses, server,
4893 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004894 rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004895
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004896 if (rc) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004897 if (rc == -ENODATA &&
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004898 rsp->hdr.Status == STATUS_NO_MORE_FILES) {
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004899 trace_smb3_query_dir_done(xid, persistent_fid,
4900 tcon->tid, tcon->ses->Suid, index, 0);
Pavel Shilovsky52755802014-08-18 20:49:57 +04004901 srch_inf->endOfSearch = true;
4902 rc = 0;
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004903 } else {
4904 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
4905 tcon->ses->Suid, index, 0, rc);
Pavel Shilovsky8e6e72a2019-01-26 12:21:32 -08004906 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004907 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004908 goto qdir_exit;
4909 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004910
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004911 rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype,
4912 srch_inf);
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004913 if (rc) {
4914 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
4915 tcon->ses->Suid, index, 0, rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004916 goto qdir_exit;
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004917 }
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004918 resp_buftype = CIFS_NO_BUFFER;
4919
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004920 trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
4921 tcon->ses->Suid, index, srch_inf->entries_in_buffer);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004922
4923qdir_exit:
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004924 SMB2_query_directory_free(&rqst);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004925 free_rsp_buf(resp_buftype, rsp);
4926 return rc;
4927}
4928
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004929int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004930SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
4931 struct smb_rqst *rqst,
4932 u64 persistent_fid, u64 volatile_fid, u32 pid,
4933 u8 info_class, u8 info_type, u32 additional_info,
4934 void **data, unsigned int *size)
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004935{
4936 struct smb2_set_info_req *req;
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004937 struct kvec *iov = rqst->rq_iov;
4938 unsigned int i, total_len;
4939 int rc;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004940
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004941 rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
4942 (void **) &req, &total_len);
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004943 if (rc)
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004944 return rc;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004945
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004946 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
Shirish Pargaonkardac95342017-06-28 22:37:00 -05004947 req->InfoType = info_type;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004948 req->FileInfoClass = info_class;
4949 req->PersistentFileId = persistent_fid;
4950 req->VolatileFileId = volatile_fid;
Shirish Pargaonkardac95342017-06-28 22:37:00 -05004951 req->AdditionalInformation = cpu_to_le32(additional_info);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004952
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004953 req->BufferOffset =
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11004954 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004955 req->BufferLength = cpu_to_le32(*size);
4956
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004957 memcpy(req->Buffer, *data, *size);
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11004958 total_len += *size;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004959
4960 iov[0].iov_base = (char *)req;
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11004961 /* 1 for Buffer */
4962 iov[0].iov_len = total_len - 1;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004963
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004964 for (i = 1; i < rqst->rq_nvec; i++) {
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004965 le32_add_cpu(&req->BufferLength, size[i]);
4966 iov[i].iov_base = (char *)data[i];
4967 iov[i].iov_len = size[i];
4968 }
4969
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004970 return 0;
4971}
4972
4973void
4974SMB2_set_info_free(struct smb_rqst *rqst)
4975{
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10004976 if (rqst && rqst->rq_iov)
4977 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004978}
4979
4980static int
4981send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
4982 u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
4983 u8 info_type, u32 additional_info, unsigned int num,
4984 void **data, unsigned int *size)
4985{
4986 struct smb_rqst rqst;
4987 struct smb2_set_info_rsp *rsp = NULL;
4988 struct kvec *iov;
4989 struct kvec rsp_iov;
4990 int rc = 0;
4991 int resp_buftype;
4992 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004993 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004994 int flags = 0;
4995
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004996 if (!ses || !server)
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004997 return -EIO;
4998
4999 if (!num)
5000 return -EINVAL;
5001
5002 if (smb3_encryption_required(tcon))
5003 flags |= CIFS_TRANSFORM_REQ;
5004
5005 iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
5006 if (!iov)
5007 return -ENOMEM;
5008
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005009 memset(&rqst, 0, sizeof(struct smb_rqst));
5010 rqst.rq_iov = iov;
5011 rqst.rq_nvec = num;
5012
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005013 rc = SMB2_set_info_init(tcon, server,
5014 &rqst, persistent_fid, volatile_fid, pid,
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10005015 info_class, info_type, additional_info,
5016 data, size);
5017 if (rc) {
5018 kfree(iov);
5019 return rc;
5020 }
5021
5022
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005023 rc = cifs_send_recv(xid, ses, server,
5024 &rqst, &resp_buftype, flags,
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11005025 &rsp_iov);
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10005026 SMB2_set_info_free(&rqst);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005027 rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07005028
Steve Frencheccb4422018-05-17 21:16:55 -05005029 if (rc != 0) {
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07005030 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05005031 trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
5032 ses->Suid, info_class, (__u32)info_type, rc);
5033 }
Steve French7d3fb242013-11-18 09:56:28 -06005034
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07005035 free_rsp_buf(resp_buftype, rsp);
5036 kfree(iov);
5037 return rc;
5038}
5039
5040int
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07005041SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10005042 u64 volatile_fid, u32 pid, __le64 *eof)
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07005043{
5044 struct smb2_file_eof_info info;
5045 void *data;
5046 unsigned int size;
5047
5048 info.EndOfFile = *eof;
5049
5050 data = &info;
5051 size = sizeof(struct smb2_file_eof_info);
5052
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10005053 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
Shirish Pargaonkardac95342017-06-28 22:37:00 -05005054 pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
5055 0, 1, &data, &size);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07005056}
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07005057
5058int
Shirish Pargaonkardac95342017-06-28 22:37:00 -05005059SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
5060 u64 persistent_fid, u64 volatile_fid,
5061 struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
5062{
5063 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5064 current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
5065 1, (void **)&pnntsd, &pacllen);
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07005066}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005067
5068int
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005069SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
5070 u64 persistent_fid, u64 volatile_fid,
5071 struct smb2_file_full_ea_info *buf, int len)
5072{
5073 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5074 current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
5075 0, 1, (void **)&buf, &len);
5076}
5077
5078int
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005079SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
5080 const u64 persistent_fid, const u64 volatile_fid,
5081 __u8 oplock_level)
5082{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005083 struct smb_rqst rqst;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005084 int rc;
Ronnie Sahlberg0d5a2882018-06-01 10:53:03 +10005085 struct smb2_oplock_break *req = NULL;
Ronnie Sahlberg21ad9482017-11-20 11:24:43 +11005086 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005087 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005088 int flags = CIFS_OBREAK_OP;
Ronnie Sahlberg21ad9482017-11-20 11:24:43 +11005089 unsigned int total_len;
5090 struct kvec iov[1];
5091 struct kvec rsp_iov;
5092 int resp_buf_type;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005093
Joe Perchesf96637b2013-05-04 22:12:25 -05005094 cifs_dbg(FYI, "SMB2_oplock_break\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005095 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
5096 (void **) &req, &total_len);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005097 if (rc)
5098 return rc;
5099
Steve French5a77e752018-05-09 17:43:08 -05005100 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005101 flags |= CIFS_TRANSFORM_REQ;
5102
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005103 req->VolatileFid = volatile_fid;
5104 req->PersistentFid = persistent_fid;
5105 req->OplockLevel = oplock_level;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005106 req->hdr.CreditRequest = cpu_to_le16(1);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005107
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10005108 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg21ad9482017-11-20 11:24:43 +11005109
5110 iov[0].iov_base = (char *)req;
5111 iov[0].iov_len = total_len;
5112
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005113 memset(&rqst, 0, sizeof(struct smb_rqst));
5114 rqst.rq_iov = iov;
5115 rqst.rq_nvec = 1;
5116
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005117 rc = cifs_send_recv(xid, ses, server,
5118 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005119 cifs_small_buf_release(req);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005120
5121 if (rc) {
5122 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05005123 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005124 }
5125
5126 return rc;
5127}
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005128
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005129void
5130smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
5131 struct kstatfs *kst)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005132{
5133 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
5134 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
5135 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
Sachin Prabhu42bec212017-08-03 13:09:03 +05305136 kst->f_bfree = kst->f_bavail =
5137 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005138 return;
5139}
5140
Steve French2d304212018-06-24 23:28:12 -05005141static void
5142copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
5143 struct kstatfs *kst)
5144{
5145 kst->f_bsize = le32_to_cpu(response_data->BlockSize);
5146 kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
5147 kst->f_bfree = le64_to_cpu(response_data->BlocksAvail);
5148 if (response_data->UserBlocksAvail == cpu_to_le64(-1))
5149 kst->f_bavail = kst->f_bfree;
5150 else
5151 kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
5152 if (response_data->TotalFileNodes != cpu_to_le64(-1))
5153 kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
5154 if (response_data->FreeFileNodes != cpu_to_le64(-1))
5155 kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
5156
5157 return;
5158}
Steve French2d304212018-06-24 23:28:12 -05005159
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005160static int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005161build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
5162 struct TCP_Server_Info *server,
5163 int level, int outbuf_len, u64 persistent_fid,
5164 u64 volatile_fid)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005165{
5166 int rc;
5167 struct smb2_query_info_req *req;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005168 unsigned int total_len;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005169
Joe Perchesf96637b2013-05-04 22:12:25 -05005170 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005171
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005172 if ((tcon->ses == NULL) || server == NULL)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005173 return -EIO;
5174
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005175 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
5176 (void **) &req, &total_len);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005177 if (rc)
5178 return rc;
5179
5180 req->InfoType = SMB2_O_INFO_FILESYSTEM;
5181 req->FileInfoClass = level;
5182 req->PersistentFileId = persistent_fid;
5183 req->VolatileFileId = volatile_fid;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005184 /* 1 for pad */
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005185 req->InputBufferOffset =
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005186 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005187 req->OutputBufferLength = cpu_to_le32(
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005188 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005189
5190 iov->iov_base = (char *)req;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005191 iov->iov_len = total_len;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005192 return 0;
5193}
5194
Steve French2d304212018-06-24 23:28:12 -05005195int
5196SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
5197 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5198{
5199 struct smb_rqst rqst;
5200 struct smb2_query_info_rsp *rsp = NULL;
5201 struct kvec iov;
5202 struct kvec rsp_iov;
5203 int rc = 0;
5204 int resp_buftype;
5205 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005206 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve French2d304212018-06-24 23:28:12 -05005207 FILE_SYSTEM_POSIX_INFO *info = NULL;
5208 int flags = 0;
5209
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005210 rc = build_qfs_info_req(&iov, tcon, server,
5211 FS_POSIX_INFORMATION,
Steve French2d304212018-06-24 23:28:12 -05005212 sizeof(FILE_SYSTEM_POSIX_INFO),
5213 persistent_fid, volatile_fid);
5214 if (rc)
5215 return rc;
5216
5217 if (smb3_encryption_required(tcon))
5218 flags |= CIFS_TRANSFORM_REQ;
5219
5220 memset(&rqst, 0, sizeof(struct smb_rqst));
5221 rqst.rq_iov = &iov;
5222 rqst.rq_nvec = 1;
5223
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005224 rc = cifs_send_recv(xid, ses, server,
5225 &rqst, &resp_buftype, flags, &rsp_iov);
Steve French2d304212018-06-24 23:28:12 -05005226 cifs_small_buf_release(iov.iov_base);
5227 if (rc) {
5228 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5229 goto posix_qfsinf_exit;
5230 }
5231 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
5232
5233 info = (FILE_SYSTEM_POSIX_INFO *)(
5234 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005235 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5236 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5237 sizeof(FILE_SYSTEM_POSIX_INFO));
Steve French2d304212018-06-24 23:28:12 -05005238 if (!rc)
5239 copy_posix_fs_info_to_kstatfs(info, fsdata);
5240
5241posix_qfsinf_exit:
5242 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
5243 return rc;
5244}
Steve French2d304212018-06-24 23:28:12 -05005245
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005246int
5247SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
5248 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5249{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005250 struct smb_rqst rqst;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005251 struct smb2_query_info_rsp *rsp = NULL;
5252 struct kvec iov;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005253 struct kvec rsp_iov;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005254 int rc = 0;
5255 int resp_buftype;
5256 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005257 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005258 struct smb2_fs_full_size_info *info = NULL;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005259 int flags = 0;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005260
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005261 rc = build_qfs_info_req(&iov, tcon, server,
5262 FS_FULL_SIZE_INFORMATION,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005263 sizeof(struct smb2_fs_full_size_info),
5264 persistent_fid, volatile_fid);
5265 if (rc)
5266 return rc;
5267
Steve French5a77e752018-05-09 17:43:08 -05005268 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005269 flags |= CIFS_TRANSFORM_REQ;
5270
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005271 memset(&rqst, 0, sizeof(struct smb_rqst));
5272 rqst.rq_iov = &iov;
5273 rqst.rq_nvec = 1;
5274
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005275 rc = cifs_send_recv(xid, ses, server,
5276 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005277 cifs_small_buf_release(iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005278 if (rc) {
5279 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
Steve French34f62642013-10-09 02:07:00 -05005280 goto qfsinf_exit;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005281 }
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005282 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005283
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005284 info = (struct smb2_fs_full_size_info *)(
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10005285 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005286 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5287 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5288 sizeof(struct smb2_fs_full_size_info));
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005289 if (!rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005290 smb2_copy_fs_info_to_kstatfs(info, fsdata);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005291
Steve French34f62642013-10-09 02:07:00 -05005292qfsinf_exit:
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005293 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Steve French34f62642013-10-09 02:07:00 -05005294 return rc;
5295}
5296
5297int
5298SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
Steven French21671142013-10-09 13:36:35 -05005299 u64 persistent_fid, u64 volatile_fid, int level)
Steve French34f62642013-10-09 02:07:00 -05005300{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005301 struct smb_rqst rqst;
Steve French34f62642013-10-09 02:07:00 -05005302 struct smb2_query_info_rsp *rsp = NULL;
5303 struct kvec iov;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005304 struct kvec rsp_iov;
Steve French34f62642013-10-09 02:07:00 -05005305 int rc = 0;
Steven French21671142013-10-09 13:36:35 -05005306 int resp_buftype, max_len, min_len;
Steve French34f62642013-10-09 02:07:00 -05005307 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005308 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve French34f62642013-10-09 02:07:00 -05005309 unsigned int rsp_len, offset;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005310 int flags = 0;
Steve French34f62642013-10-09 02:07:00 -05005311
Steven French21671142013-10-09 13:36:35 -05005312 if (level == FS_DEVICE_INFORMATION) {
5313 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
5314 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
5315 } else if (level == FS_ATTRIBUTE_INFORMATION) {
5316 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
5317 min_len = MIN_FS_ATTR_INFO_SIZE;
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005318 } else if (level == FS_SECTOR_SIZE_INFORMATION) {
5319 max_len = sizeof(struct smb3_fs_ss_info);
5320 min_len = sizeof(struct smb3_fs_ss_info);
Steve French21ba3842018-06-24 23:18:52 -05005321 } else if (level == FS_VOLUME_INFORMATION) {
5322 max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
5323 min_len = sizeof(struct smb3_fs_vol_info);
Steven French21671142013-10-09 13:36:35 -05005324 } else {
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005325 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
Steven French21671142013-10-09 13:36:35 -05005326 return -EINVAL;
5327 }
5328
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005329 rc = build_qfs_info_req(&iov, tcon, server,
5330 level, max_len,
Steve French34f62642013-10-09 02:07:00 -05005331 persistent_fid, volatile_fid);
5332 if (rc)
5333 return rc;
5334
Steve French5a77e752018-05-09 17:43:08 -05005335 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005336 flags |= CIFS_TRANSFORM_REQ;
5337
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005338 memset(&rqst, 0, sizeof(struct smb_rqst));
5339 rqst.rq_iov = &iov;
5340 rqst.rq_nvec = 1;
5341
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005342 rc = cifs_send_recv(xid, ses, server,
5343 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005344 cifs_small_buf_release(iov.iov_base);
Steve French34f62642013-10-09 02:07:00 -05005345 if (rc) {
5346 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5347 goto qfsattr_exit;
5348 }
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005349 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Steve French34f62642013-10-09 02:07:00 -05005350
5351 rsp_len = le32_to_cpu(rsp->OutputBufferLength);
5352 offset = le16_to_cpu(rsp->OutputBufferOffset);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005353 rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
Steven French21671142013-10-09 13:36:35 -05005354 if (rc)
5355 goto qfsattr_exit;
5356
5357 if (level == FS_ATTRIBUTE_INFORMATION)
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005358 memcpy(&tcon->fsAttrInfo, offset
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10005359 + (char *)rsp, min_t(unsigned int,
Steven French21671142013-10-09 13:36:35 -05005360 rsp_len, max_len));
5361 else if (level == FS_DEVICE_INFORMATION)
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005362 memcpy(&tcon->fsDevInfo, offset
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10005363 + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005364 else if (level == FS_SECTOR_SIZE_INFORMATION) {
5365 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005366 (offset + (char *)rsp);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005367 tcon->ss_flags = le32_to_cpu(ss_info->Flags);
5368 tcon->perf_sector_size =
5369 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
Steve French21ba3842018-06-24 23:18:52 -05005370 } else if (level == FS_VOLUME_INFORMATION) {
5371 struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
5372 (offset + (char *)rsp);
5373 tcon->vol_serial_number = vol_info->VolumeSerialNumber;
5374 tcon->vol_create_time = vol_info->VolumeCreationTime;
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005375 }
Steve French34f62642013-10-09 02:07:00 -05005376
5377qfsattr_exit:
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005378 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005379 return rc;
5380}
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005381
5382int
5383smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
5384 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
5385 const __u32 num_lock, struct smb2_lock_element *buf)
5386{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005387 struct smb_rqst rqst;
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005388 int rc = 0;
5389 struct smb2_lock_req *req = NULL;
5390 struct kvec iov[2];
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005391 struct kvec rsp_iov;
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005392 int resp_buf_type;
5393 unsigned int count;
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10005394 int flags = CIFS_NO_RSP_BUF;
Ronnie Sahlbergced93672017-11-21 10:07:27 +11005395 unsigned int total_len;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005396 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005397
Joe Perchesf96637b2013-05-04 22:12:25 -05005398 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005399
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005400 rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
5401 (void **) &req, &total_len);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005402 if (rc)
5403 return rc;
5404
Steve French5a77e752018-05-09 17:43:08 -05005405 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005406 flags |= CIFS_TRANSFORM_REQ;
5407
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005408 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005409 req->LockCount = cpu_to_le16(num_lock);
5410
5411 req->PersistentFileId = persist_fid;
5412 req->VolatileFileId = volatile_fid;
5413
5414 count = num_lock * sizeof(struct smb2_lock_element);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005415
5416 iov[0].iov_base = (char *)req;
Ronnie Sahlbergced93672017-11-21 10:07:27 +11005417 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005418 iov[1].iov_base = (char *)buf;
5419 iov[1].iov_len = count;
5420
5421 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005422
5423 memset(&rqst, 0, sizeof(struct smb_rqst));
5424 rqst.rq_iov = iov;
5425 rqst.rq_nvec = 2;
5426
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005427 rc = cifs_send_recv(xid, tcon->ses, server,
5428 &rqst, &resp_buf_type, flags,
Ronnie Sahlbergced93672017-11-21 10:07:27 +11005429 &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005430 cifs_small_buf_release(req);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005431 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05005432 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005433 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05005434 trace_smb3_lock_err(xid, persist_fid, tcon->tid,
5435 tcon->ses->Suid, rc);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005436 }
5437
5438 return rc;
5439}
5440
5441int
5442SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
5443 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
5444 const __u64 length, const __u64 offset, const __u32 lock_flags,
5445 const bool wait)
5446{
5447 struct smb2_lock_element lock;
5448
5449 lock.Offset = cpu_to_le64(offset);
5450 lock.Length = cpu_to_le64(length);
5451 lock.Flags = cpu_to_le32(lock_flags);
5452 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
5453 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
5454
5455 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
5456}
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005457
5458int
5459SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
5460 __u8 *lease_key, const __le32 lease_state)
5461{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005462 struct smb_rqst rqst;
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005463 int rc;
5464 struct smb2_lease_ack *req = NULL;
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005465 struct cifs_ses *ses = tcon->ses;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005466 int flags = CIFS_OBREAK_OP;
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005467 unsigned int total_len;
5468 struct kvec iov[1];
5469 struct kvec rsp_iov;
5470 int resp_buf_type;
Steve French179e44d2018-09-28 19:44:23 -05005471 __u64 *please_key_high;
5472 __u64 *please_key_low;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005473 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005474
Joe Perchesf96637b2013-05-04 22:12:25 -05005475 cifs_dbg(FYI, "SMB2_lease_break\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005476 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
5477 (void **) &req, &total_len);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005478 if (rc)
5479 return rc;
5480
Steve French5a77e752018-05-09 17:43:08 -05005481 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005482 flags |= CIFS_TRANSFORM_REQ;
5483
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005484 req->hdr.CreditRequest = cpu_to_le16(1);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005485 req->StructureSize = cpu_to_le16(36);
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005486 total_len += 12;
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005487
5488 memcpy(req->LeaseKey, lease_key, 16);
5489 req->LeaseState = lease_state;
5490
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10005491 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005492
5493 iov[0].iov_base = (char *)req;
5494 iov[0].iov_len = total_len;
5495
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005496 memset(&rqst, 0, sizeof(struct smb_rqst));
5497 rqst.rq_iov = iov;
5498 rqst.rq_nvec = 1;
5499
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005500 rc = cifs_send_recv(xid, ses, server,
5501 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005502 cifs_small_buf_release(req);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005503
Aurelien Apteld339adc2019-01-31 13:46:07 +01005504 please_key_low = (__u64 *)lease_key;
5505 please_key_high = (__u64 *)(lease_key+8);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005506 if (rc) {
5507 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Steve French179e44d2018-09-28 19:44:23 -05005508 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
5509 ses->Suid, *please_key_low, *please_key_high, rc);
Joe Perchesf96637b2013-05-04 22:12:25 -05005510 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
Steve French179e44d2018-09-28 19:44:23 -05005511 } else
5512 trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
5513 ses->Suid, *please_key_low, *please_key_high);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005514
5515 return rc;
5516}