blob: 0005989d281a9eebd6f6fa9eb35b2cc0c585de2c [file] [log] [blame]
Steve French929be902021-06-18 00:31:49 -05001// SPDX-License-Identifier: LGPL-2.1
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04002/*
3 * fs/cifs/smb2pdu.c
4 *
Steve French2b80d042013-06-23 18:43:37 -05005 * Copyright (C) International Business Machines Corp., 2009, 2013
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04006 * Etersoft, 2012
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Pavel Shilovsky (pshilovsky@samba.org) 2012
9 *
10 * Contains the routines for constructing the SMB2 PDUs themselves
11 *
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040012 */
13
14 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
15 /* Note that there are handle based routines which must be */
16 /* treated slightly differently for reconnection purposes since we never */
17 /* want to reuse a stale file handle and only the caller knows the file info */
18
19#include <linux/fs.h>
20#include <linux/kernel.h>
21#include <linux/vfs.h>
Pavel Shilovsky09a47072012-09-18 16:20:29 -070022#include <linux/task_io_accounting_ops.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040023#include <linux/uaccess.h>
Andrew Lunnc6e970a2017-03-28 23:45:06 +020024#include <linux/uuid.h>
Pavel Shilovsky33319142012-09-18 16:20:29 -070025#include <linux/pagemap.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040026#include <linux/xattr.h>
27#include "smb2pdu.h"
28#include "cifsglob.h"
29#include "cifsacl.h"
30#include "cifsproto.h"
31#include "smb2proto.h"
32#include "cifs_unicode.h"
33#include "cifs_debug.h"
34#include "ntlmssp.h"
35#include "smb2status.h"
Pavel Shilovsky09a47072012-09-18 16:20:29 -070036#include "smb2glob.h"
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -070037#include "cifspdu.h"
Steve Frenchceb1b0b2015-09-24 00:52:37 -050038#include "cifs_spnego.h"
Long Lidb223a52017-11-22 17:38:45 -070039#include "smbdirect.h"
Steve Frencheccb4422018-05-17 21:16:55 -050040#include "trace.h"
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -020041#ifdef CONFIG_CIFS_DFS_UPCALL
42#include "dfs_cache.h"
43#endif
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040044
45/*
46 * The following table defines the expected "StructureSize" of SMB2 requests
47 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
48 *
49 * Note that commands are defined in smb2pdu.h in le16 but the array below is
50 * indexed by command in host byte order.
51 */
52static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
53 /* SMB2_NEGOTIATE */ 36,
54 /* SMB2_SESSION_SETUP */ 25,
55 /* SMB2_LOGOFF */ 4,
56 /* SMB2_TREE_CONNECT */ 9,
57 /* SMB2_TREE_DISCONNECT */ 4,
58 /* SMB2_CREATE */ 57,
59 /* SMB2_CLOSE */ 24,
60 /* SMB2_FLUSH */ 24,
61 /* SMB2_READ */ 49,
62 /* SMB2_WRITE */ 49,
63 /* SMB2_LOCK */ 48,
64 /* SMB2_IOCTL */ 57,
65 /* SMB2_CANCEL */ 4,
66 /* SMB2_ECHO */ 4,
67 /* SMB2_QUERY_DIRECTORY */ 33,
68 /* SMB2_CHANGE_NOTIFY */ 32,
69 /* SMB2_QUERY_INFO */ 41,
70 /* SMB2_SET_INFO */ 33,
71 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
72};
73
Ronnie Sahlberg730928c2018-08-08 15:07:49 +100074int smb3_encryption_required(const struct cifs_tcon *tcon)
Pavel Shilovsky7fb89862016-10-31 13:49:30 -070075{
Steve Frenchedb16132020-05-31 14:36:56 -050076 if (!tcon || !tcon->ses)
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -080077 return 0;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -070078 if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
79 (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
80 return 1;
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -080081 if (tcon->seal &&
82 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
83 return 1;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -070084 return 0;
85}
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040086
87static void
Pavel Shilovskycb200bd2016-10-24 16:59:57 -070088smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd,
Aurelien Aptel352d96f2020-05-31 12:38:22 -050089 const struct cifs_tcon *tcon,
90 struct TCP_Server_Info *server)
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040091{
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070092 shdr->ProtocolId = SMB2_PROTO_NUMBER;
93 shdr->StructureSize = cpu_to_le16(64);
94 shdr->Command = smb2_cmd;
Aurelien Aptel352d96f2020-05-31 12:38:22 -050095 if (server) {
Ross Lagerwall7d414f32016-09-20 13:37:13 +010096 spin_lock(&server->req_lock);
Steve French69dc4b12019-03-05 21:04:56 -060097 /* Request up to 10 credits but don't go over the limit. */
Steve French141891f2016-09-23 00:44:16 -050098 if (server->credits >= server->max_credits)
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070099 shdr->CreditRequest = cpu_to_le16(0);
Ross Lagerwall7d414f32016-09-20 13:37:13 +0100100 else
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700101 shdr->CreditRequest = cpu_to_le16(
Steve French141891f2016-09-23 00:44:16 -0500102 min_t(int, server->max_credits -
Steve French69dc4b12019-03-05 21:04:56 -0600103 server->credits, 10));
Ross Lagerwall7d414f32016-09-20 13:37:13 +0100104 spin_unlock(&server->req_lock);
105 } else {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700106 shdr->CreditRequest = cpu_to_le16(2);
Ross Lagerwall7d414f32016-09-20 13:37:13 +0100107 }
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700108 shdr->ProcessId = cpu_to_le32((__u16)current->tgid);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400109
110 if (!tcon)
111 goto out;
112
Steve French2b80d042013-06-23 18:43:37 -0500113 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
114 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500115 if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700116 shdr->CreditCharge = cpu_to_le16(1);
Steve French2b80d042013-06-23 18:43:37 -0500117 /* else CreditCharge MBZ */
118
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700119 shdr->TreeId = tcon->tid;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400120 /* Uid is not converted */
121 if (tcon->ses)
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700122 shdr->SessionId = tcon->ses->Suid;
Steve Frenchf87ab882013-06-26 19:14:55 -0500123
124 /*
125 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
126 * to pass the path on the Open SMB prefixed by \\server\share.
127 * Not sure when we would need to do the augmented path (if ever) and
128 * setting this flag breaks the SMB2 open operation since it is
129 * illegal to send an empty path name (without \\server\share prefix)
130 * when the DFS flag is set in the SMB open header. We could
131 * consider setting the flag on all operations other than open
132 * but it is safer to net set it for now.
133 */
134/* if (tcon->share_flags & SHI1005_FLAGS_DFS)
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700135 shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
Steve Frenchf87ab882013-06-26 19:14:55 -0500136
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500137 if (server && server->sign && !smb3_encryption_required(tcon))
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700138 shdr->Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400139out:
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400140 return;
141}
142
143static int
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500144smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
145 struct TCP_Server_Info *server)
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400146{
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300147 int rc;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400148 struct nls_table *nls_codepage;
149 struct cifs_ses *ses;
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200150 int retries;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400151
152 /*
153 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
154 * check for tcp and smb session status done differently
155 * for those three - in the calling routine.
156 */
157 if (tcon == NULL)
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300158 return 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400159
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300160 if (smb2_command == SMB2_TREE_CONNECT)
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300161 return 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400162
163 if (tcon->tidStatus == CifsExiting) {
164 /*
165 * only tree disconnect, open, and write,
166 * (and ulogoff which does not have tcon)
167 * are allowed as we start force umount.
168 */
169 if ((smb2_command != SMB2_WRITE) &&
170 (smb2_command != SMB2_CREATE) &&
171 (smb2_command != SMB2_TREE_DISCONNECT)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500172 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
173 smb2_command);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400174 return -ENODEV;
175 }
176 }
177 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500178 (!tcon->ses->server) || !server)
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400179 return -EIO;
180
181 ses = tcon->ses;
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200182 retries = server->nr_targets;
183
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400184 /*
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200185 * Give demultiplex thread up to 10 seconds to each target available for
186 * reconnect -- should be greater than cifs socket timeout which is 7
187 * seconds.
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400188 */
189 while (server->tcpStatus == CifsNeedReconnect) {
190 /*
191 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
192 * here since they are implicitly done when session drops.
193 */
194 switch (smb2_command) {
195 /*
196 * BB Should we keep oplock break and add flush to exceptions?
197 */
198 case SMB2_TREE_DISCONNECT:
199 case SMB2_CANCEL:
200 case SMB2_CLOSE:
201 case SMB2_OPLOCK_BREAK:
202 return -EAGAIN;
203 }
204
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300205 rc = wait_event_interruptible_timeout(server->response_q,
206 (server->tcpStatus != CifsNeedReconnect),
207 10 * HZ);
208 if (rc < 0) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700209 cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
210 __func__);
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300211 return -ERESTARTSYS;
212 }
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400213
214 /* are we still trying to reconnect? */
215 if (server->tcpStatus != CifsNeedReconnect)
216 break;
217
Ronnie Sahlbergc54849d2020-01-31 05:52:51 +1000218 if (retries && --retries)
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200219 continue;
220
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400221 /*
222 * on "soft" mounts we wait once. Hard mounts keep
223 * retrying until process is killed or server comes
224 * back on-line
225 */
226 if (!tcon->retry) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500227 cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400228 return -EHOSTDOWN;
229 }
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200230 retries = server->nr_targets;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400231 }
232
233 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300234 return 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400235
236 nls_codepage = load_nls_default();
237
238 /*
239 * need to prevent multiple threads trying to simultaneously reconnect
240 * the same SMB session
241 */
242 mutex_lock(&tcon->ses->session_mutex);
Samuel Cabrero76e75272017-07-11 12:44:39 +0200243
244 /*
245 * Recheck after acquire mutex. If another thread is negotiating
246 * and the server never sends an answer the socket will be closed
247 * and tcpStatus set to reconnect.
248 */
249 if (server->tcpStatus == CifsNeedReconnect) {
250 rc = -EHOSTDOWN;
251 mutex_unlock(&tcon->ses->session_mutex);
252 goto out;
253 }
254
Aurelien Aptel2f589672020-04-24 16:55:31 +0200255 /*
256 * If we are reconnecting an extra channel, bind
257 */
258 if (server->is_channel) {
259 ses->binding = true;
260 ses->binding_chan = cifs_ses_find_chan(ses, server);
261 }
262
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400263 rc = cifs_negotiate_protocol(0, tcon->ses);
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000264 if (!rc && tcon->ses->need_reconnect) {
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400265 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000266 if ((rc == -EACCES) && !tcon->retry) {
267 rc = -EHOSTDOWN;
Aurelien Aptel2f589672020-04-24 16:55:31 +0200268 ses->binding = false;
269 ses->binding_chan = NULL;
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000270 mutex_unlock(&tcon->ses->session_mutex);
271 goto failed;
272 }
273 }
Aurelien Aptel2f589672020-04-24 16:55:31 +0200274 /*
275 * End of channel binding
276 */
277 ses->binding = false;
278 ses->binding_chan = NULL;
279
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400280 if (rc || !tcon->need_reconnect) {
281 mutex_unlock(&tcon->ses->session_mutex);
282 goto out;
283 }
284
285 cifs_mark_open_files_invalid(tcon);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800286 if (tcon->use_persistent)
287 tcon->need_reopen_files = true;
Steve French52ace1e2016-09-22 19:23:56 -0500288
Stefan Metzmacher565674d2020-07-21 09:36:38 -0300289 rc = cifs_tree_connect(0, tcon, nls_codepage);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400290 mutex_unlock(&tcon->ses->session_mutex);
Steve French52ace1e2016-09-22 19:23:56 -0500291
Joe Perchesf96637b2013-05-04 22:12:25 -0500292 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
Steve Frenchc318e6c2018-04-04 14:08:52 -0500293 if (rc) {
294 /* If sess reconnected but tcon didn't, something strange ... */
Joe Perchesa0a30362020-04-14 22:42:53 -0700295 pr_warn_once("reconnect tcon failed rc = %d\n", rc);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400296 goto out;
Steve Frenchc318e6c2018-04-04 14:08:52 -0500297 }
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800298
299 if (smb2_command != SMB2_INTERNAL_CMD)
Stefan Metzmacherb08484d2020-02-24 14:14:59 +0100300 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800301
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400302 atomic_inc(&tconInfoReconnectCount);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400303out:
304 /*
305 * Check if handle based operation so we know whether we can continue
306 * or not without returning to caller to reset file handle.
307 */
308 /*
309 * BB Is flush done by server on drop of tcp session? Should we special
310 * case it and skip above?
311 */
312 switch (smb2_command) {
313 case SMB2_FLUSH:
314 case SMB2_READ:
315 case SMB2_WRITE:
316 case SMB2_LOCK:
317 case SMB2_IOCTL:
318 case SMB2_QUERY_DIRECTORY:
319 case SMB2_CHANGE_NOTIFY:
320 case SMB2_QUERY_INFO:
321 case SMB2_SET_INFO:
Pavel Shilovsky4772c792016-11-29 11:30:58 -0800322 rc = -EAGAIN;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400323 }
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000324failed:
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400325 unload_nls(nls_codepage);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400326 return rc;
327}
328
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700329static void
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500330fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
331 struct TCP_Server_Info *server,
332 void *buf,
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700333 unsigned int *total_len)
334{
335 struct smb2_sync_pdu *spdu = (struct smb2_sync_pdu *)buf;
336 /* lookup word count ie StructureSize from table */
337 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
338
339 /*
340 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
341 * largest operations (Create)
342 */
343 memset(buf, 0, 256);
344
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500345 smb2_hdr_assemble(&spdu->sync_hdr, smb2_command, tcon, server);
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700346 spdu->StructureSize2 = cpu_to_le16(parmsize);
347
348 *total_len = parmsize + sizeof(struct smb2_sync_hdr);
349}
350
Ronnie Sahlberg305428a2017-11-21 11:04:42 +1100351/*
352 * Allocate and return pointer to an SMB request hdr, and set basic
353 * SMB information in the SMB header. If the return code is zero, this
354 * function must have filled in request_buf pointer.
355 */
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300356static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500357 struct TCP_Server_Info *server,
358 void **request_buf, unsigned int *total_len)
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -0800359{
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -0800360 /* BB eventually switch this to SMB2 specific small buf size */
Stefano Briviof46ecbd2018-07-05 11:46:42 +0200361 if (smb2_command == SMB2_SET_INFO)
362 *request_buf = cifs_buf_get();
363 else
364 *request_buf = cifs_small_buf_get();
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -0800365 if (*request_buf == NULL) {
366 /* BB should we add a retry in here if not a writepage? */
367 return -ENOMEM;
368 }
369
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500370 fill_small_buf(smb2_command, tcon, server,
Ronnie Sahlberg305428a2017-11-21 11:04:42 +1100371 (struct smb2_sync_hdr *)(*request_buf),
372 total_len);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400373
374 if (tcon != NULL) {
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400375 uint16_t com_code = le16_to_cpu(smb2_command);
376 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400377 cifs_stats_inc(&tcon->num_smbs_sent);
378 }
379
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300380 return 0;
381}
382
383static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500384 struct TCP_Server_Info *server,
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300385 void **request_buf, unsigned int *total_len)
386{
387 int rc;
388
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500389 rc = smb2_reconnect(smb2_command, tcon, server);
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300390 if (rc)
391 return rc;
392
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500393 return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300394 total_len);
395}
396
397static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500398 struct TCP_Server_Info *server,
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300399 void **request_buf, unsigned int *total_len)
400{
401 /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
402 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500403 return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
404 request_buf, total_len);
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300405 }
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500406 return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
407 request_buf, total_len);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400408}
409
Steve Frenchd7bef4c2019-04-18 11:03:58 -0500410/* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500411
412static void
413build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
414{
415 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
416 pneg_ctxt->DataLength = cpu_to_le16(38);
417 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
Steve French7955f102020-12-09 22:19:00 -0600418 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_LINUX_CLIENT_SALT_SIZE);
419 get_random_bytes(pneg_ctxt->Salt, SMB311_LINUX_CLIENT_SALT_SIZE);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500420 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
421}
422
423static void
Steve French26ea8882019-04-26 20:36:08 -0700424build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
425{
426 pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
427 pneg_ctxt->DataLength =
428 cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
429 - sizeof(struct smb2_neg_context));
430 pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3);
431 pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77;
432 pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF;
433 pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
434}
435
436static void
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500437build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
438{
439 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
Steve Frenchfbfd0b42020-09-11 16:19:28 -0500440 if (require_gcm_256) {
441 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */
442 pneg_ctxt->CipherCount = cpu_to_le16(1);
443 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM;
Steve French29e27922020-10-14 20:24:09 -0500444 } else if (enable_gcm_256) {
445 pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */
446 pneg_ctxt->CipherCount = cpu_to_le16(3);
447 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
448 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM;
449 pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM;
Steve Frenchfbfd0b42020-09-11 16:19:28 -0500450 } else {
451 pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */
452 pneg_ctxt->CipherCount = cpu_to_le16(2);
453 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
454 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
455 }
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500456}
457
Steve French96d3cca2019-06-25 04:39:51 -0500458static unsigned int
459build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
460{
461 struct nls_table *cp = load_nls_default();
462
463 pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID;
464
465 /* copy up to max of first 100 bytes of server name to NetName field */
Steve Frenchdf58fae2019-08-05 17:07:26 -0500466 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
Steve French96d3cca2019-06-25 04:39:51 -0500467 /* context size is DataLength + minimal smb2_neg_context */
468 return DIV_ROUND_UP(le16_to_cpu(pneg_ctxt->DataLength) +
469 sizeof(struct smb2_neg_context), 8) * 8;
470}
471
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500472static void
Steve Frenchfcef0db2018-05-19 20:45:27 -0500473build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
474{
475 pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
476 pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
Steve French0d481322019-02-24 17:56:33 -0600477 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
478 pneg_ctxt->Name[0] = 0x93;
479 pneg_ctxt->Name[1] = 0xAD;
480 pneg_ctxt->Name[2] = 0x25;
481 pneg_ctxt->Name[3] = 0x50;
482 pneg_ctxt->Name[4] = 0x9C;
483 pneg_ctxt->Name[5] = 0xB4;
484 pneg_ctxt->Name[6] = 0x11;
485 pneg_ctxt->Name[7] = 0xE7;
486 pneg_ctxt->Name[8] = 0xB4;
487 pneg_ctxt->Name[9] = 0x23;
488 pneg_ctxt->Name[10] = 0x83;
489 pneg_ctxt->Name[11] = 0xDE;
490 pneg_ctxt->Name[12] = 0x96;
491 pneg_ctxt->Name[13] = 0x8B;
492 pneg_ctxt->Name[14] = 0xCD;
493 pneg_ctxt->Name[15] = 0x7C;
Steve Frenchfcef0db2018-05-19 20:45:27 -0500494}
495
496static void
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100497assemble_neg_contexts(struct smb2_negotiate_req *req,
Steve French9fe5ff12019-06-24 20:39:04 -0500498 struct TCP_Server_Info *server, unsigned int *total_len)
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500499{
Colin Ian Kinga9f76cf2019-12-02 18:59:42 +0000500 char *pneg_ctxt;
Steve Frenchfcef0db2018-05-19 20:45:27 -0500501 unsigned int ctxt_len;
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500502
Steve Frenchd5c70762019-01-03 02:37:21 -0600503 if (*total_len > 200) {
504 /* In case length corrupted don't want to overrun smb buffer */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000505 cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n");
Steve Frenchd5c70762019-01-03 02:37:21 -0600506 return;
507 }
508
509 /*
510 * round up total_len of fixed part of SMB3 negotiate request to 8
511 * byte boundary before adding negotiate contexts
512 */
513 *total_len = roundup(*total_len, 8);
514
515 pneg_ctxt = (*total_len) + (char *)req;
516 req->NegotiateContextOffset = cpu_to_le32(*total_len);
517
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500518 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
Steve Frenchfcef0db2018-05-19 20:45:27 -0500519 ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_preauth_neg_context), 8) * 8;
520 *total_len += ctxt_len;
521 pneg_ctxt += ctxt_len;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100522
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500523 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
Steve Frenchfcef0db2018-05-19 20:45:27 -0500524 ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_encryption_neg_context), 8) * 8;
525 *total_len += ctxt_len;
526 pneg_ctxt += ctxt_len;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100527
Steve French9fe5ff12019-06-24 20:39:04 -0500528 if (server->compress_algorithm) {
529 build_compression_ctxt((struct smb2_compression_capabilities_context *)
Steve French26ea8882019-04-26 20:36:08 -0700530 pneg_ctxt);
Steve French9fe5ff12019-06-24 20:39:04 -0500531 ctxt_len = DIV_ROUND_UP(
532 sizeof(struct smb2_compression_capabilities_context),
533 8) * 8;
534 *total_len += ctxt_len;
535 pneg_ctxt += ctxt_len;
Steve French96d3cca2019-06-25 04:39:51 -0500536 req->NegotiateContextCount = cpu_to_le16(5);
Steve French9fe5ff12019-06-24 20:39:04 -0500537 } else
Steve French96d3cca2019-06-25 04:39:51 -0500538 req->NegotiateContextCount = cpu_to_le16(4);
539
540 ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
541 server->hostname);
542 *total_len += ctxt_len;
543 pneg_ctxt += ctxt_len;
544
Steve Frenchfcef0db2018-05-19 20:45:27 -0500545 build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
546 *total_len += sizeof(struct smb2_posix_neg_context);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500547}
Steve French5100d8a2018-04-09 10:47:14 -0500548
549static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
550{
551 unsigned int len = le16_to_cpu(ctxt->DataLength);
552
553 /* If invalid preauth context warn but use what we requested, SHA-512 */
554 if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700555 pr_warn_once("server sent bad preauth context\n");
Steve French5100d8a2018-04-09 10:47:14 -0500556 return;
Steve French7955f102020-12-09 22:19:00 -0600557 } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
558 pr_warn_once("server sent invalid SaltLength\n");
559 return;
Steve French5100d8a2018-04-09 10:47:14 -0500560 }
561 if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
Joe Perchesa0a30362020-04-14 22:42:53 -0700562 pr_warn_once("Invalid SMB3 hash algorithm count\n");
Steve French5100d8a2018-04-09 10:47:14 -0500563 if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
Joe Perchesa0a30362020-04-14 22:42:53 -0700564 pr_warn_once("unknown SMB3 hash algorithm\n");
Steve French5100d8a2018-04-09 10:47:14 -0500565}
566
Steve French26ea8882019-04-26 20:36:08 -0700567static void decode_compress_ctx(struct TCP_Server_Info *server,
568 struct smb2_compression_capabilities_context *ctxt)
569{
570 unsigned int len = le16_to_cpu(ctxt->DataLength);
571
572 /* sizeof compress context is a one element compression capbility struct */
573 if (len < 10) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700574 pr_warn_once("server sent bad compression cntxt\n");
Steve French26ea8882019-04-26 20:36:08 -0700575 return;
576 }
577 if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700578 pr_warn_once("Invalid SMB3 compress algorithm count\n");
Steve French26ea8882019-04-26 20:36:08 -0700579 return;
580 }
581 if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700582 pr_warn_once("unknown compression algorithm\n");
Steve French26ea8882019-04-26 20:36:08 -0700583 return;
584 }
585 server->compress_algorithm = ctxt->CompressionAlgorithms[0];
586}
587
Steve French5100d8a2018-04-09 10:47:14 -0500588static int decode_encrypt_ctx(struct TCP_Server_Info *server,
589 struct smb2_encryption_neg_context *ctxt)
590{
591 unsigned int len = le16_to_cpu(ctxt->DataLength);
592
593 cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
594 if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700595 pr_warn_once("server sent bad crypto ctxt len\n");
Steve French5100d8a2018-04-09 10:47:14 -0500596 return -EINVAL;
597 }
598
599 if (le16_to_cpu(ctxt->CipherCount) != 1) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700600 pr_warn_once("Invalid SMB3.11 cipher count\n");
Steve French5100d8a2018-04-09 10:47:14 -0500601 return -EINVAL;
602 }
603 cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
Steve French511ac892020-10-15 00:14:47 -0500604 if (require_gcm_256) {
605 if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) {
606 cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n");
607 return -EOPNOTSUPP;
608 }
609 } else if (ctxt->Ciphers[0] == 0) {
Steve Frenchacf96fe2020-10-17 03:54:27 -0500610 /*
611 * e.g. if server only supported AES256_CCM (very unlikely)
612 * or server supported no encryption types or had all disabled.
613 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case
614 * in which mount requested encryption ("seal") checks later
615 * on during tree connection will return proper rc, but if
616 * seal not requested by client, since server is allowed to
617 * return 0 to indicate no supported cipher, we can't fail here
618 */
619 server->cipher_type = 0;
620 server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION;
621 pr_warn_once("Server does not support requested encryption types\n");
622 return 0;
Steve French511ac892020-10-15 00:14:47 -0500623 } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
624 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) &&
625 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) {
626 /* server returned a cipher we didn't ask for */
Joe Perchesa0a30362020-04-14 22:42:53 -0700627 pr_warn_once("Invalid SMB3.11 cipher returned\n");
Steve French5100d8a2018-04-09 10:47:14 -0500628 return -EINVAL;
629 }
630 server->cipher_type = ctxt->Ciphers[0];
Steve French23657ad2018-04-22 15:14:58 -0500631 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
Steve French5100d8a2018-04-09 10:47:14 -0500632 return 0;
633}
634
635static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +1000636 struct TCP_Server_Info *server,
637 unsigned int len_of_smb)
Steve French5100d8a2018-04-09 10:47:14 -0500638{
639 struct smb2_neg_context *pctx;
640 unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
641 unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
Steve French5100d8a2018-04-09 10:47:14 -0500642 unsigned int len_of_ctxts, i;
643 int rc = 0;
644
645 cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
646 if (len_of_smb <= offset) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000647 cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n");
Steve French5100d8a2018-04-09 10:47:14 -0500648 return -EINVAL;
649 }
650
651 len_of_ctxts = len_of_smb - offset;
652
653 for (i = 0; i < ctxt_cnt; i++) {
654 int clen;
655 /* check that offset is not beyond end of SMB */
656 if (len_of_ctxts == 0)
657 break;
658
659 if (len_of_ctxts < sizeof(struct smb2_neg_context))
660 break;
661
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000662 pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
Steve French5100d8a2018-04-09 10:47:14 -0500663 clen = le16_to_cpu(pctx->DataLength);
664 if (clen > len_of_ctxts)
665 break;
666
667 if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
668 decode_preauth_context(
669 (struct smb2_preauth_neg_context *)pctx);
670 else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
671 rc = decode_encrypt_ctx(server,
672 (struct smb2_encryption_neg_context *)pctx);
Steve French26ea8882019-04-26 20:36:08 -0700673 else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES)
674 decode_compress_ctx(server,
675 (struct smb2_compression_capabilities_context *)pctx);
Steve Frenchfcef0db2018-05-19 20:45:27 -0500676 else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
677 server->posix_ext_supported = true;
Steve French5100d8a2018-04-09 10:47:14 -0500678 else
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000679 cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
Steve French5100d8a2018-04-09 10:47:14 -0500680 le16_to_cpu(pctx->ContextType));
681
682 if (rc)
683 break;
684 /* offsets must be 8 byte aligned */
685 clen = (clen + 7) & ~0x7;
686 offset += clen + sizeof(struct smb2_neg_context);
687 len_of_ctxts -= clen;
688 }
689 return rc;
690}
691
Steve Frenchce558b02018-05-31 19:16:54 -0500692static struct create_posix *
693create_posix_buf(umode_t mode)
694{
695 struct create_posix *buf;
696
697 buf = kzalloc(sizeof(struct create_posix),
698 GFP_KERNEL);
699 if (!buf)
700 return NULL;
701
702 buf->ccontext.DataOffset =
703 cpu_to_le16(offsetof(struct create_posix, Mode));
704 buf->ccontext.DataLength = cpu_to_le32(4);
705 buf->ccontext.NameOffset =
706 cpu_to_le16(offsetof(struct create_posix, Name));
707 buf->ccontext.NameLength = cpu_to_le16(16);
708
709 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
710 buf->Name[0] = 0x93;
711 buf->Name[1] = 0xAD;
712 buf->Name[2] = 0x25;
713 buf->Name[3] = 0x50;
714 buf->Name[4] = 0x9C;
715 buf->Name[5] = 0xB4;
716 buf->Name[6] = 0x11;
717 buf->Name[7] = 0xE7;
718 buf->Name[8] = 0xB4;
719 buf->Name[9] = 0x23;
720 buf->Name[10] = 0x83;
721 buf->Name[11] = 0xDE;
722 buf->Name[12] = 0x96;
723 buf->Name[13] = 0x8B;
724 buf->Name[14] = 0xCD;
725 buf->Name[15] = 0x7C;
726 buf->Mode = cpu_to_le32(mode);
Joe Perchesa0a30362020-04-14 22:42:53 -0700727 cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
Steve Frenchce558b02018-05-31 19:16:54 -0500728 return buf;
729}
730
731static int
732add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
733{
734 struct smb2_create_req *req = iov[0].iov_base;
735 unsigned int num = *num_iovec;
736
737 iov[num].iov_base = create_posix_buf(mode);
Steve Frenchd0959b02019-10-05 10:53:58 -0500738 if (mode == ACL_NO_MODE)
Joe Perchesa0a30362020-04-14 22:42:53 -0700739 cifs_dbg(FYI, "Invalid mode\n");
Steve Frenchce558b02018-05-31 19:16:54 -0500740 if (iov[num].iov_base == NULL)
741 return -ENOMEM;
742 iov[num].iov_len = sizeof(struct create_posix);
743 if (!req->CreateContextsOffset)
744 req->CreateContextsOffset = cpu_to_le32(
745 sizeof(struct smb2_create_req) +
746 iov[num - 1].iov_len);
747 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_posix));
748 *num_iovec = num + 1;
749 return 0;
750}
751
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500752
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400753/*
754 *
755 * SMB2 Worker functions follow:
756 *
757 * The general structure of the worker functions is:
758 * 1) Call smb2_init (assembles SMB2 header)
759 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
760 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
761 * 4) Decode SMB2 command specific fields in the fixed length area
762 * 5) Decode variable length data area (if any for this SMB2 command type)
763 * 6) Call free smb buffer
764 * 7) return
765 *
766 */
767
768int
769SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
770{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +1000771 struct smb_rqst rqst;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400772 struct smb2_negotiate_req *req;
773 struct smb2_negotiate_rsp *rsp;
774 struct kvec iov[1];
Pavel Shilovskyda502f72016-10-25 11:38:47 -0700775 struct kvec rsp_iov;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400776 int rc = 0;
777 int resp_buftype;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200778 struct TCP_Server_Info *server = cifs_ses_server(ses);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400779 int blob_offset, blob_length;
780 char *security_blob;
781 int flags = CIFS_NEG_OP;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100782 unsigned int total_len;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400783
Joe Perchesf96637b2013-05-04 22:12:25 -0500784 cifs_dbg(FYI, "Negotiate protocol\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400785
Jeff Layton3534b852013-05-24 07:41:01 -0400786 if (!server) {
787 WARN(1, "%s: server is NULL!\n", __func__);
788 return -EIO;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400789 }
790
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500791 rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
792 (void **) &req, &total_len);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400793 if (rc)
794 return rc;
795
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100796 req->sync_hdr.SessionId = 0;
Steve French0fdfef92018-06-28 19:30:23 -0500797
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100798 memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
799 memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400800
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200801 if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500802 SMB3ANY_VERSION_STRING) == 0) {
803 req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
804 req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
Steve French6dffa4c2021-02-02 00:03:58 -0600805 req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
806 req->DialectCount = cpu_to_le16(3);
807 total_len += 6;
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000808 } else if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500809 SMBDEFAULT_VERSION_STRING) == 0) {
810 req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
811 req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
812 req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
Steve Frenchd5c70762019-01-03 02:37:21 -0600813 req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
814 req->DialectCount = cpu_to_le16(4);
815 total_len += 8;
Steve French9764c022017-09-17 10:41:35 -0500816 } else {
817 /* otherwise send specific dialect */
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200818 req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
Steve French9764c022017-09-17 10:41:35 -0500819 req->DialectCount = cpu_to_le16(1);
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100820 total_len += 2;
Steve French9764c022017-09-17 10:41:35 -0500821 }
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400822
823 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400824 if (ses->sign)
Steve French9cd2e622013-06-12 19:59:03 -0500825 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400826 else if (global_secflags & CIFSSEC_MAY_SIGN)
Steve French9cd2e622013-06-12 19:59:03 -0500827 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400828 else
829 req->SecurityMode = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400830
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000831 req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
Steve French679971e2021-05-07 18:24:11 -0500832 if (ses->chan_max > 1)
833 req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400834
Steve French3c5f9be12014-05-13 13:37:45 -0700835 /* ClientGUID must be zero for SMB2.02 dialect */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000836 if (server->vals->protocol_id == SMB20_PROT_ID)
Steve French3c5f9be12014-05-13 13:37:45 -0700837 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500838 else {
Steve French3c5f9be12014-05-13 13:37:45 -0700839 memcpy(req->ClientGUID, server->client_guid,
840 SMB2_CLIENT_GUID_SIZE);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000841 if ((server->vals->protocol_id == SMB311_PROT_ID) ||
842 (strcmp(server->vals->version_string,
Steve French6dffa4c2021-02-02 00:03:58 -0600843 SMB3ANY_VERSION_STRING) == 0) ||
844 (strcmp(server->vals->version_string,
Steve Frenchd5c70762019-01-03 02:37:21 -0600845 SMBDEFAULT_VERSION_STRING) == 0))
Steve French9fe5ff12019-06-24 20:39:04 -0500846 assemble_neg_contexts(req, server, &total_len);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500847 }
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400848 iov[0].iov_base = (char *)req;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100849 iov[0].iov_len = total_len;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400850
Ronnie Sahlberg40eff452018-06-12 08:00:59 +1000851 memset(&rqst, 0, sizeof(struct smb_rqst));
852 rqst.rq_iov = iov;
853 rqst.rq_nvec = 1;
854
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500855 rc = cifs_send_recv(xid, ses, server,
856 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -0700857 cifs_small_buf_release(req);
858 rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400859 /*
860 * No tcon so can't do
861 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
862 */
Steve French7e682f72017-08-31 21:34:24 -0500863 if (rc == -EOPNOTSUPP) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700864 cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
Steve French7e682f72017-08-31 21:34:24 -0500865 goto neg_exit;
866 } else if (rc != 0)
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400867 goto neg_exit;
868
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000869 if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500870 SMB3ANY_VERSION_STRING) == 0) {
871 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000872 cifs_server_dbg(VFS,
Steve French9764c022017-09-17 10:41:35 -0500873 "SMB2 dialect returned but not requested\n");
874 return -EIO;
875 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000876 cifs_server_dbg(VFS,
Steve French9764c022017-09-17 10:41:35 -0500877 "SMB2.1 dialect returned but not requested\n");
878 return -EIO;
Steve French6dffa4c2021-02-02 00:03:58 -0600879 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
880 /* ops set to 3.0 by default for default so update */
881 server->ops = &smb311_operations;
882 server->vals = &smb311_values;
Steve French9764c022017-09-17 10:41:35 -0500883 }
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000884 } else if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500885 SMBDEFAULT_VERSION_STRING) == 0) {
886 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000887 cifs_server_dbg(VFS,
Steve French9764c022017-09-17 10:41:35 -0500888 "SMB2 dialect returned but not requested\n");
889 return -EIO;
890 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
891 /* ops set to 3.0 by default for default so update */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000892 server->ops = &smb21_operations;
893 server->vals = &smb21_values;
ZhangXiaoxub57a55e2019-04-06 15:30:38 +0800894 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000895 server->ops = &smb311_operations;
896 server->vals = &smb311_values;
ZhangXiaoxub57a55e2019-04-06 15:30:38 +0800897 }
Steve French590d08d2017-09-19 11:43:47 -0500898 } else if (le16_to_cpu(rsp->DialectRevision) !=
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000899 server->vals->protocol_id) {
Steve French9764c022017-09-17 10:41:35 -0500900 /* if requested single dialect ensure returned dialect matched */
Joe Perchesa0a30362020-04-14 22:42:53 -0700901 cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
902 le16_to_cpu(rsp->DialectRevision));
Steve French9764c022017-09-17 10:41:35 -0500903 return -EIO;
904 }
905
Joe Perchesf96637b2013-05-04 22:12:25 -0500906 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400907
Steve Frenche4aa25e2012-10-01 12:26:22 -0500908 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500909 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500910 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500911 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500912 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500913 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
Steve French20b6d8b2013-06-12 22:48:41 -0500914 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
915 cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
Steve French5f7fbf72014-12-17 22:52:58 -0600916 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
917 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400918 else {
Joe Perchesa0a30362020-04-14 22:42:53 -0700919 cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
920 le16_to_cpu(rsp->DialectRevision));
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400921 rc = -EIO;
922 goto neg_exit;
923 }
924 server->dialect = le16_to_cpu(rsp->DialectRevision);
925
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100926 /*
927 * Keep a copy of the hash after negprot. This hash will be
928 * the starting hash value for all sessions made from this
929 * server.
930 */
931 memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
932 SMB2_PREAUTH_HASH_SIZE);
Steve French0fdfef92018-06-28 19:30:23 -0500933
Jeff Laytone598d1d82013-05-26 07:00:59 -0400934 /* SMB2 only has an extended negflavor */
935 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
Pavel Shilovsky2365c4e2014-02-14 13:31:02 +0400936 /* set it to the maximum buffer size value we can send with 1 credit */
937 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
938 SMB2_MAX_BUFFER_SIZE);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400939 server->max_read = le32_to_cpu(rsp->MaxReadSize);
940 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400941 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
Steve French07108d02018-04-01 20:15:55 -0500942 if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
943 cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
944 server->sec_mode);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400945 server->capabilities = le32_to_cpu(rsp->Capabilities);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400946 /* Internal types */
947 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400948
Aurelien Aptel6d2fcfe2021-05-21 17:19:27 +0200949 /*
950 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
951 * Set the cipher type manually.
952 */
953 if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
954 server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
955
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400956 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000957 (struct smb2_sync_hdr *)rsp);
Steve French5d875cc2013-06-25 15:33:41 -0500958 /*
959 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
960 * for us will be
961 * ses->sectype = RawNTLMSSP;
962 * but for time being this is our only auth choice so doesn't matter.
963 * We just found a server which sets blob length to zero expecting raw.
964 */
Pavel Shilovsky67dbea22017-04-12 13:32:07 -0700965 if (blob_length == 0) {
Steve French5d875cc2013-06-25 15:33:41 -0500966 cifs_dbg(FYI, "missing security blob on negprot\n");
Pavel Shilovsky67dbea22017-04-12 13:32:07 -0700967 server->sec_ntlmssp = true;
968 }
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700969
Jeff Layton38d77c52013-05-26 07:01:00 -0400970 rc = cifs_enable_signing(server, ses->sign);
Jeff Layton9ddec562013-05-26 07:00:58 -0400971 if (rc)
972 goto neg_exit;
Steve Frenchceb1b0b2015-09-24 00:52:37 -0500973 if (blob_length) {
Steve Frenchebdd2072014-10-20 12:48:23 -0500974 rc = decode_negTokenInit(security_blob, blob_length, server);
Steve Frenchceb1b0b2015-09-24 00:52:37 -0500975 if (rc == 1)
976 rc = 0;
977 else if (rc == 0)
978 rc = -EIO;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400979 }
Steve French5100d8a2018-04-09 10:47:14 -0500980
Steve French5100d8a2018-04-09 10:47:14 -0500981 if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
982 if (rsp->NegotiateContextCount)
Ronnie Sahlberg977b6172018-06-01 10:53:02 +1000983 rc = smb311_decode_neg_context(rsp, server,
984 rsp_iov.iov_len);
Steve French5100d8a2018-04-09 10:47:14 -0500985 else
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000986 cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
Steve French5100d8a2018-04-09 10:47:14 -0500987 }
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400988neg_exit:
989 free_rsp_buf(resp_buftype, rsp);
990 return rc;
991}
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400992
Steve Frenchff1c0382013-11-19 23:44:46 -0600993int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
994{
Long Li2796d302018-04-25 11:30:04 -0700995 int rc;
996 struct validate_negotiate_info_req *pneg_inbuf;
David Disseldorpfe83bebc2017-10-20 14:49:37 +0200997 struct validate_negotiate_info_rsp *pneg_rsp = NULL;
Steve Frenchff1c0382013-11-19 23:44:46 -0600998 u32 rsplen;
Steve French9764c022017-09-17 10:41:35 -0500999 u32 inbuflen; /* max of 4 dialects */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001000 struct TCP_Server_Info *server = tcon->ses->server;
Steve Frenchff1c0382013-11-19 23:44:46 -06001001
1002 cifs_dbg(FYI, "validate negotiate\n");
1003
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001004 /* In SMB3.11 preauth integrity supersedes validate negotiate */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001005 if (server->dialect == SMB311_PROT_ID)
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001006 return 0;
1007
Steve Frenchff1c0382013-11-19 23:44:46 -06001008 /*
1009 * validation ioctl must be signed, so no point sending this if we
Steve French0603c962017-09-20 19:57:18 -05001010 * can not sign it (ie are not known user). Even if signing is not
1011 * required (enabled but not negotiated), in those cases we selectively
Steve Frenchff1c0382013-11-19 23:44:46 -06001012 * sign just this, the first and only signed request on a connection.
Steve French0603c962017-09-20 19:57:18 -05001013 * Having validation of negotiate info helps reduce attack vectors.
Steve Frenchff1c0382013-11-19 23:44:46 -06001014 */
Steve French0603c962017-09-20 19:57:18 -05001015 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
Steve Frenchff1c0382013-11-19 23:44:46 -06001016 return 0; /* validation requires signing */
1017
Steve French0603c962017-09-20 19:57:18 -05001018 if (tcon->ses->user_name == NULL) {
1019 cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
1020 return 0; /* validation requires signing */
1021 }
1022
1023 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001024 cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
Steve French0603c962017-09-20 19:57:18 -05001025
Long Li2796d302018-04-25 11:30:04 -07001026 pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
1027 if (!pneg_inbuf)
1028 return -ENOMEM;
1029
1030 pneg_inbuf->Capabilities =
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001031 cpu_to_le32(server->vals->req_capabilities);
Steve French679971e2021-05-07 18:24:11 -05001032 if (tcon->ses->chan_max > 1)
1033 pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1034
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001035 memcpy(pneg_inbuf->Guid, server->client_guid,
Sachin Prabhu39552ea2014-05-13 00:48:12 +01001036 SMB2_CLIENT_GUID_SIZE);
Steve Frenchff1c0382013-11-19 23:44:46 -06001037
1038 if (tcon->ses->sign)
Long Li2796d302018-04-25 11:30:04 -07001039 pneg_inbuf->SecurityMode =
Steve Frenchff1c0382013-11-19 23:44:46 -06001040 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1041 else if (global_secflags & CIFSSEC_MAY_SIGN)
Long Li2796d302018-04-25 11:30:04 -07001042 pneg_inbuf->SecurityMode =
Steve Frenchff1c0382013-11-19 23:44:46 -06001043 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1044 else
Long Li2796d302018-04-25 11:30:04 -07001045 pneg_inbuf->SecurityMode = 0;
Steve Frenchff1c0382013-11-19 23:44:46 -06001046
Steve French9764c022017-09-17 10:41:35 -05001047
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001048 if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -05001049 SMB3ANY_VERSION_STRING) == 0) {
Long Li2796d302018-04-25 11:30:04 -07001050 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1051 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
Steve French6dffa4c2021-02-02 00:03:58 -06001052 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1053 pneg_inbuf->DialectCount = cpu_to_le16(3);
1054 /* SMB 2.1 not included so subtract one dialect from len */
Long Li2796d302018-04-25 11:30:04 -07001055 inbuflen = sizeof(*pneg_inbuf) -
Steve French6dffa4c2021-02-02 00:03:58 -06001056 (sizeof(pneg_inbuf->Dialects[0]));
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001057 } else if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -05001058 SMBDEFAULT_VERSION_STRING) == 0) {
Long Li2796d302018-04-25 11:30:04 -07001059 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1060 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1061 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
Steve Frenchd5c70762019-01-03 02:37:21 -06001062 pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1063 pneg_inbuf->DialectCount = cpu_to_le16(4);
Steve French6dffa4c2021-02-02 00:03:58 -06001064 /* structure is big enough for 4 dialects */
Long Li2796d302018-04-25 11:30:04 -07001065 inbuflen = sizeof(*pneg_inbuf);
Steve French9764c022017-09-17 10:41:35 -05001066 } else {
1067 /* otherwise specific dialect was requested */
Long Li2796d302018-04-25 11:30:04 -07001068 pneg_inbuf->Dialects[0] =
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001069 cpu_to_le16(server->vals->protocol_id);
Long Li2796d302018-04-25 11:30:04 -07001070 pneg_inbuf->DialectCount = cpu_to_le16(1);
Steve French9764c022017-09-17 10:41:35 -05001071 /* structure is big enough for 3 dialects, sending only 1 */
Long Li2796d302018-04-25 11:30:04 -07001072 inbuflen = sizeof(*pneg_inbuf) -
1073 sizeof(pneg_inbuf->Dialects[0]) * 2;
Steve French9764c022017-09-17 10:41:35 -05001074 }
Steve Frenchff1c0382013-11-19 23:44:46 -06001075
1076 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
1077 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001078 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
1079 (char **)&pneg_rsp, &rsplen);
Namjae Jeon969ae8e2019-01-22 09:46:45 +09001080 if (rc == -EOPNOTSUPP) {
1081 /*
1082 * Old Windows versions or Netapp SMB server can return
1083 * not supported error. Client should accept it.
1084 */
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001085 cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n");
Colin Ian King21078202019-05-17 09:12:33 +01001086 rc = 0;
1087 goto out_free_inbuf;
Namjae Jeon969ae8e2019-01-22 09:46:45 +09001088 } else if (rc != 0) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001089 cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
1090 rc);
Long Li2796d302018-04-25 11:30:04 -07001091 rc = -EIO;
1092 goto out_free_inbuf;
Steve Frenchff1c0382013-11-19 23:44:46 -06001093 }
1094
Long Li2796d302018-04-25 11:30:04 -07001095 rc = -EIO;
1096 if (rsplen != sizeof(*pneg_rsp)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001097 cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
1098 rsplen);
Steve French7db0a6e2017-05-03 21:12:20 -05001099
1100 /* relax check since Mac returns max bufsize allowed on ioctl */
Long Li2796d302018-04-25 11:30:04 -07001101 if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
1102 goto out_free_rsp;
Steve Frenchff1c0382013-11-19 23:44:46 -06001103 }
1104
1105 /* check validate negotiate info response matches what we got earlier */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001106 if (pneg_rsp->Dialect != cpu_to_le16(server->dialect))
Steve Frenchff1c0382013-11-19 23:44:46 -06001107 goto vneg_out;
1108
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001109 if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode))
Steve Frenchff1c0382013-11-19 23:44:46 -06001110 goto vneg_out;
1111
1112 /* do not validate server guid because not saved at negprot time yet */
1113
1114 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001115 SMB2_LARGE_FILES) != server->capabilities)
Steve Frenchff1c0382013-11-19 23:44:46 -06001116 goto vneg_out;
1117
1118 /* validate negotiate successful */
Long Li2796d302018-04-25 11:30:04 -07001119 rc = 0;
Steve Frenchff1c0382013-11-19 23:44:46 -06001120 cifs_dbg(FYI, "validate negotiate info successful\n");
Long Li2796d302018-04-25 11:30:04 -07001121 goto out_free_rsp;
Steve Frenchff1c0382013-11-19 23:44:46 -06001122
1123vneg_out:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001124 cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n");
Long Li2796d302018-04-25 11:30:04 -07001125out_free_rsp:
David Disseldorpfe83bebc2017-10-20 14:49:37 +02001126 kfree(pneg_rsp);
Long Li2796d302018-04-25 11:30:04 -07001127out_free_inbuf:
1128 kfree(pneg_inbuf);
1129 return rc;
Steve Frenchff1c0382013-11-19 23:44:46 -06001130}
1131
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301132enum securityEnum
1133smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
1134{
1135 switch (requested) {
1136 case Kerberos:
1137 case RawNTLMSSP:
1138 return requested;
1139 case NTLMv2:
1140 return RawNTLMSSP;
1141 case Unspecified:
1142 if (server->sec_ntlmssp &&
1143 (global_secflags & CIFSSEC_MAY_NTLMSSP))
1144 return RawNTLMSSP;
1145 if ((server->sec_kerberos || server->sec_mskerberos) &&
1146 (global_secflags & CIFSSEC_MAY_KRB5))
1147 return Kerberos;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001148 fallthrough;
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301149 default:
1150 return Unspecified;
1151 }
1152}
1153
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001154struct SMB2_sess_data {
1155 unsigned int xid;
1156 struct cifs_ses *ses;
1157 struct nls_table *nls_cp;
1158 void (*func)(struct SMB2_sess_data *);
1159 int result;
1160 u64 previous_session;
1161
1162 /* we will send the SMB in three pieces:
1163 * a fixed length beginning part, an optional
1164 * SPNEGO blob (which can be zero length), and a
1165 * last part which will include the strings
1166 * and rest of bcc area. This allows us to avoid
1167 * a large buffer 17K allocation
1168 */
1169 int buf0_type;
1170 struct kvec iov[2];
1171};
1172
1173static int
1174SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
1175{
1176 int rc;
1177 struct cifs_ses *ses = sess_data->ses;
1178 struct smb2_sess_setup_req *req;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001179 struct TCP_Server_Info *server = cifs_ses_server(ses);
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001180 unsigned int total_len;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001181
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001182 rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
1183 (void **) &req,
1184 &total_len);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001185 if (rc)
1186 return rc;
1187
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001188 if (sess_data->ses->binding) {
1189 req->sync_hdr.SessionId = sess_data->ses->Suid;
1190 req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
1191 req->PreviousSessionId = 0;
1192 req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
1193 } else {
1194 /* First session, not a reauthenticate */
1195 req->sync_hdr.SessionId = 0;
1196 /*
1197 * if reconnect, we need to send previous sess id
1198 * otherwise it is 0
1199 */
1200 req->PreviousSessionId = sess_data->previous_session;
1201 req->Flags = 0; /* MBZ */
1202 }
Steve Frenchd4090142018-06-13 17:05:58 -05001203
1204 /* enough to enable echos and oplocks and one max size write */
1205 req->sync_hdr.CreditRequest = cpu_to_le16(130);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001206
1207 /* only one of SMB2 signing flags may be set in SMB2 request */
1208 if (server->sign)
1209 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
1210 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
1211 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
1212 else
1213 req->SecurityMode = 0;
1214
Steve French8d330962019-07-25 18:13:10 -05001215#ifdef CONFIG_CIFS_DFS_UPCALL
1216 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
1217#else
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001218 req->Capabilities = 0;
Steve French8d330962019-07-25 18:13:10 -05001219#endif /* DFS_UPCALL */
1220
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001221 req->Channel = 0; /* MBZ */
1222
1223 sess_data->iov[0].iov_base = (char *)req;
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001224 /* 1 for pad */
1225 sess_data->iov[0].iov_len = total_len - 1;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001226 /*
1227 * This variable will be used to clear the buffer
1228 * allocated above in case of any error in the calling function.
1229 */
1230 sess_data->buf0_type = CIFS_SMALL_BUFFER;
1231
1232 return 0;
1233}
1234
1235static void
1236SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
1237{
1238 free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
1239 sess_data->buf0_type = CIFS_NO_BUFFER;
1240}
1241
1242static int
1243SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
1244{
1245 int rc;
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001246 struct smb_rqst rqst;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001247 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001248 struct kvec rsp_iov = { NULL, 0 };
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001249
1250 /* Testing shows that buffer offset must be at location of Buffer[0] */
1251 req->SecurityBufferOffset =
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001252 cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001253 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
1254
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001255 memset(&rqst, 0, sizeof(struct smb_rqst));
1256 rqst.rq_iov = sess_data->iov;
1257 rqst.rq_nvec = 2;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001258
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001259 /* BB add code to build os and lm fields */
1260 rc = cifs_send_recv(sess_data->xid, sess_data->ses,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001261 cifs_ses_server(sess_data->ses),
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001262 &rqst,
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001263 &sess_data->buf0_type,
Shyam Prasad N0f56db82021-02-03 22:49:52 -08001264 CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001265 cifs_small_buf_release(sess_data->iov[0].iov_base);
1266 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001267
1268 return rc;
1269}
1270
1271static int
1272SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
1273{
1274 int rc = 0;
1275 struct cifs_ses *ses = sess_data->ses;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001276 struct TCP_Server_Info *server = cifs_ses_server(ses);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001277
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001278 mutex_lock(&server->srv_mutex);
1279 if (server->ops->generate_signingkey) {
1280 rc = server->ops->generate_signingkey(ses);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001281 if (rc) {
1282 cifs_dbg(FYI,
1283 "SMB3 session key generation failed\n");
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001284 mutex_unlock(&server->srv_mutex);
Pavel Shilovskycabfb362016-11-07 18:20:50 -08001285 return rc;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001286 }
1287 }
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001288 if (!server->session_estab) {
1289 server->sequence_number = 0x2;
1290 server->session_estab = true;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001291 }
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001292 mutex_unlock(&server->srv_mutex);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001293
1294 cifs_dbg(FYI, "SMB2/3 session established successfully\n");
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001295 /* keep existing ses state if binding */
1296 if (!ses->binding) {
1297 spin_lock(&GlobalMid_Lock);
1298 ses->status = CifsGood;
1299 ses->need_reconnect = false;
1300 spin_unlock(&GlobalMid_Lock);
1301 }
1302
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001303 return rc;
1304}
1305
1306#ifdef CONFIG_CIFS_UPCALL
1307static void
1308SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1309{
1310 int rc;
1311 struct cifs_ses *ses = sess_data->ses;
1312 struct cifs_spnego_msg *msg;
1313 struct key *spnego_key = NULL;
1314 struct smb2_sess_setup_rsp *rsp = NULL;
1315
1316 rc = SMB2_sess_alloc_buffer(sess_data);
1317 if (rc)
1318 goto out;
1319
1320 spnego_key = cifs_get_spnego_key(ses);
1321 if (IS_ERR(spnego_key)) {
1322 rc = PTR_ERR(spnego_key);
Steve French0a018942020-07-16 00:34:21 -05001323 if (rc == -ENOKEY)
1324 cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001325 spnego_key = NULL;
1326 goto out;
1327 }
1328
1329 msg = spnego_key->payload.data[0];
1330 /*
1331 * check version field to make sure that cifs.upcall is
1332 * sending us a response in an expected form
1333 */
1334 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001335 cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
1336 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001337 rc = -EKEYREJECTED;
1338 goto out_put_spnego_key;
1339 }
1340
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001341 /* keep session key if binding */
1342 if (!ses->binding) {
1343 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
1344 GFP_KERNEL);
1345 if (!ses->auth_key.response) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001346 cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001347 msg->sesskey_len);
1348 rc = -ENOMEM;
1349 goto out_put_spnego_key;
1350 }
1351 ses->auth_key.len = msg->sesskey_len;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001352 }
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001353
1354 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
1355 sess_data->iov[1].iov_len = msg->secblob_len;
1356
1357 rc = SMB2_sess_sendreceive(sess_data);
1358 if (rc)
1359 goto out_put_spnego_key;
1360
1361 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001362 /* keep session id and flags if binding */
1363 if (!ses->binding) {
1364 ses->Suid = rsp->sync_hdr.SessionId;
1365 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1366 }
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001367
1368 rc = SMB2_sess_establish_session(sess_data);
1369out_put_spnego_key:
1370 key_invalidate(spnego_key);
1371 key_put(spnego_key);
1372out:
1373 sess_data->result = rc;
1374 sess_data->func = NULL;
1375 SMB2_sess_free_buffer(sess_data);
1376}
1377#else
1378static void
1379SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1380{
1381 cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
1382 sess_data->result = -EOPNOTSUPP;
1383 sess_data->func = NULL;
1384}
1385#endif
1386
Sachin Prabhu166cea42016-10-07 19:11:22 +01001387static void
1388SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
1389
1390static void
1391SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
1392{
1393 int rc;
1394 struct cifs_ses *ses = sess_data->ses;
1395 struct smb2_sess_setup_rsp *rsp = NULL;
1396 char *ntlmssp_blob = NULL;
1397 bool use_spnego = false; /* else use raw ntlmssp */
1398 u16 blob_length = 0;
1399
1400 /*
1401 * If memory allocation is successful, caller of this function
1402 * frees it.
1403 */
1404 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
1405 if (!ses->ntlmssp) {
1406 rc = -ENOMEM;
1407 goto out_err;
1408 }
1409 ses->ntlmssp->sesskey_per_smbsess = true;
1410
1411 rc = SMB2_sess_alloc_buffer(sess_data);
1412 if (rc)
1413 goto out_err;
1414
1415 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
1416 GFP_KERNEL);
1417 if (ntlmssp_blob == NULL) {
1418 rc = -ENOMEM;
1419 goto out;
1420 }
1421
1422 build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
1423 if (use_spnego) {
1424 /* BB eventually need to add this */
1425 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1426 rc = -EOPNOTSUPP;
1427 goto out;
1428 } else {
1429 blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
1430 /* with raw NTLMSSP we don't encapsulate in SPNEGO */
1431 }
1432 sess_data->iov[1].iov_base = ntlmssp_blob;
1433 sess_data->iov[1].iov_len = blob_length;
1434
1435 rc = SMB2_sess_sendreceive(sess_data);
1436 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1437
1438 /* If true, rc here is expected and not an error */
1439 if (sess_data->buf0_type != CIFS_NO_BUFFER &&
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10001440 rsp->sync_hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
Sachin Prabhu166cea42016-10-07 19:11:22 +01001441 rc = 0;
1442
1443 if (rc)
1444 goto out;
1445
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10001446 if (offsetof(struct smb2_sess_setup_rsp, Buffer) !=
Sachin Prabhu166cea42016-10-07 19:11:22 +01001447 le16_to_cpu(rsp->SecurityBufferOffset)) {
1448 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
1449 le16_to_cpu(rsp->SecurityBufferOffset));
1450 rc = -EIO;
1451 goto out;
1452 }
1453 rc = decode_ntlmssp_challenge(rsp->Buffer,
1454 le16_to_cpu(rsp->SecurityBufferLength), ses);
1455 if (rc)
1456 goto out;
1457
1458 cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
1459
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001460 /* keep existing ses id and flags if binding */
1461 if (!ses->binding) {
1462 ses->Suid = rsp->sync_hdr.SessionId;
1463 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1464 }
Sachin Prabhu166cea42016-10-07 19:11:22 +01001465
1466out:
1467 kfree(ntlmssp_blob);
1468 SMB2_sess_free_buffer(sess_data);
1469 if (!rc) {
1470 sess_data->result = 0;
1471 sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
1472 return;
1473 }
1474out_err:
1475 kfree(ses->ntlmssp);
1476 ses->ntlmssp = NULL;
1477 sess_data->result = rc;
1478 sess_data->func = NULL;
1479}
1480
1481static void
1482SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
1483{
1484 int rc;
1485 struct cifs_ses *ses = sess_data->ses;
1486 struct smb2_sess_setup_req *req;
1487 struct smb2_sess_setup_rsp *rsp = NULL;
1488 unsigned char *ntlmssp_blob = NULL;
1489 bool use_spnego = false; /* else use raw ntlmssp */
1490 u16 blob_length = 0;
1491
1492 rc = SMB2_sess_alloc_buffer(sess_data);
1493 if (rc)
1494 goto out;
1495
1496 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001497 req->sync_hdr.SessionId = ses->Suid;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001498
1499 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
1500 sess_data->nls_cp);
1501 if (rc) {
1502 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
1503 goto out;
1504 }
1505
1506 if (use_spnego) {
1507 /* BB eventually need to add this */
1508 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1509 rc = -EOPNOTSUPP;
1510 goto out;
1511 }
1512 sess_data->iov[1].iov_base = ntlmssp_blob;
1513 sess_data->iov[1].iov_len = blob_length;
1514
1515 rc = SMB2_sess_sendreceive(sess_data);
1516 if (rc)
1517 goto out;
1518
1519 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1520
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001521 /* keep existing ses id and flags if binding */
1522 if (!ses->binding) {
1523 ses->Suid = rsp->sync_hdr.SessionId;
1524 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1525 }
Sachin Prabhu166cea42016-10-07 19:11:22 +01001526
1527 rc = SMB2_sess_establish_session(sess_data);
Ronnie Sahlbergf560cda2020-04-12 16:09:26 +10001528#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
1529 if (ses->server->dialect < SMB30_PROT_ID) {
1530 cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__);
1531 /*
1532 * The session id is opaque in terms of endianness, so we can't
1533 * print it as a long long. we dump it as we got it on the wire
1534 */
1535 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid),
1536 &ses->Suid);
1537 cifs_dbg(VFS, "Session Key %*ph\n",
1538 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
1539 cifs_dbg(VFS, "Signing Key %*ph\n",
1540 SMB3_SIGN_KEY_SIZE, ses->auth_key.response);
1541 }
1542#endif
Sachin Prabhu166cea42016-10-07 19:11:22 +01001543out:
1544 kfree(ntlmssp_blob);
1545 SMB2_sess_free_buffer(sess_data);
1546 kfree(ses->ntlmssp);
1547 ses->ntlmssp = NULL;
1548 sess_data->result = rc;
1549 sess_data->func = NULL;
1550}
1551
1552static int
1553SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
1554{
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301555 int type;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001556
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001557 type = smb2_select_sectype(cifs_ses_server(ses), ses->sectype);
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301558 cifs_dbg(FYI, "sess setup type %d\n", type);
1559 if (type == Unspecified) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001560 cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301561 return -EINVAL;
1562 }
1563
1564 switch (type) {
Sachin Prabhu166cea42016-10-07 19:11:22 +01001565 case Kerberos:
1566 sess_data->func = SMB2_auth_kerberos;
1567 break;
1568 case RawNTLMSSP:
1569 sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
1570 break;
1571 default:
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301572 cifs_dbg(VFS, "secType %d not supported!\n", type);
Sachin Prabhu166cea42016-10-07 19:11:22 +01001573 return -EOPNOTSUPP;
1574 }
1575
1576 return 0;
1577}
1578
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001579int
1580SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
1581 const struct nls_table *nls_cp)
1582{
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001583 int rc = 0;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001584 struct TCP_Server_Info *server = cifs_ses_server(ses);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001585 struct SMB2_sess_data *sess_data;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001586
Joe Perchesf96637b2013-05-04 22:12:25 -05001587 cifs_dbg(FYI, "Session Setup\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001588
Jeff Layton3534b852013-05-24 07:41:01 -04001589 if (!server) {
1590 WARN(1, "%s: server is NULL!\n", __func__);
1591 return -EIO;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001592 }
1593
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001594 sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
1595 if (!sess_data)
1596 return -ENOMEM;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001597
1598 rc = SMB2_select_sec(ses, sess_data);
1599 if (rc)
1600 goto out;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001601 sess_data->xid = xid;
1602 sess_data->ses = ses;
1603 sess_data->buf0_type = CIFS_NO_BUFFER;
1604 sess_data->nls_cp = (struct nls_table *) nls_cp;
Steve Frenchb2adf22f2018-05-31 15:19:25 -05001605 sess_data->previous_session = ses->Suid;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001606
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001607 /*
1608 * Initialize the session hash with the server one.
1609 */
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001610 memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001611 SMB2_PREAUTH_HASH_SIZE);
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001612
Sachin Prabhu166cea42016-10-07 19:11:22 +01001613 while (sess_data->func)
1614 sess_data->func(sess_data);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001615
Steve Frenchc721c382017-09-19 18:40:03 -05001616 if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001617 cifs_server_dbg(VFS, "signing requested but authenticated as guest\n");
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001618 rc = sess_data->result;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001619out:
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001620 kfree(sess_data);
1621 return rc;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001622}
1623
1624int
1625SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
1626{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001627 struct smb_rqst rqst;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001628 struct smb2_logoff_req *req; /* response is also trivial struct */
1629 int rc = 0;
1630 struct TCP_Server_Info *server;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001631 int flags = 0;
Ronnie Sahlberg45305ed2017-11-09 12:14:17 +11001632 unsigned int total_len;
1633 struct kvec iov[1];
1634 struct kvec rsp_iov;
1635 int resp_buf_type;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001636
Joe Perchesf96637b2013-05-04 22:12:25 -05001637 cifs_dbg(FYI, "disconnect session %p\n", ses);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001638
1639 if (ses && (ses->server))
1640 server = ses->server;
1641 else
1642 return -EIO;
1643
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -05001644 /* no need to send SMB logoff if uid already closed due to reconnect */
1645 if (ses->need_reconnect)
1646 goto smb2_session_already_dead;
1647
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001648 rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
1649 (void **) &req, &total_len);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001650 if (rc)
1651 return rc;
1652
1653 /* since no tcon, smb2_init can not do this, so do here */
Ronnie Sahlberg45305ed2017-11-09 12:14:17 +11001654 req->sync_hdr.SessionId = ses->Suid;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001655
1656 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
1657 flags |= CIFS_TRANSFORM_REQ;
1658 else if (server->sign)
Ronnie Sahlberg45305ed2017-11-09 12:14:17 +11001659 req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001660
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10001661 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg45305ed2017-11-09 12:14:17 +11001662
1663 iov[0].iov_base = (char *)req;
1664 iov[0].iov_len = total_len;
1665
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001666 memset(&rqst, 0, sizeof(struct smb_rqst));
1667 rqst.rq_iov = iov;
1668 rqst.rq_nvec = 1;
1669
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001670 rc = cifs_send_recv(xid, ses, ses->server,
1671 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001672 cifs_small_buf_release(req);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001673 /*
1674 * No tcon so can't do
1675 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
1676 */
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -05001677
1678smb2_session_already_dead:
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001679 return rc;
1680}
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001681
1682static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
1683{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001684 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001685}
1686
1687#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
1688
Steve Frenchde9f68df2013-11-15 11:26:24 -06001689/* These are similar values to what Windows uses */
1690static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
1691{
1692 tcon->max_chunks = 256;
1693 tcon->max_bytes_chunk = 1048576;
1694 tcon->max_bytes_copy = 16777216;
1695}
1696
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001697int
1698SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1699 struct cifs_tcon *tcon, const struct nls_table *cp)
1700{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001701 struct smb_rqst rqst;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001702 struct smb2_tree_connect_req *req;
1703 struct smb2_tree_connect_rsp *rsp = NULL;
1704 struct kvec iov[2];
Aurélien Apteldb3b5472017-10-11 13:23:36 +02001705 struct kvec rsp_iov = { NULL, 0 };
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001706 int rc = 0;
1707 int resp_buftype;
1708 int unc_path_len;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001709 __le16 *unc_path = NULL;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001710 int flags = 0;
Ronnie Sahlberg661bb9432017-11-09 12:14:23 +11001711 unsigned int total_len;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001712 struct TCP_Server_Info *server;
1713
1714 /* always use master channel */
1715 server = ses->server;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001716
Joe Perchesf96637b2013-05-04 22:12:25 -05001717 cifs_dbg(FYI, "TCON\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001718
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001719 if (!server || !tree)
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001720 return -EIO;
1721
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001722 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
1723 if (unc_path == NULL)
1724 return -ENOMEM;
1725
1726 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
1727 unc_path_len *= 2;
1728 if (unc_path_len < 2) {
1729 kfree(unc_path);
1730 return -EINVAL;
1731 }
1732
Jan-Marek Glogowski806a28e2017-02-20 12:25:58 +01001733 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
Aurelien Aptelb327a712018-01-24 13:46:10 +01001734 tcon->tid = 0;
Steve Frenchfae80442018-10-19 17:14:32 -05001735 atomic_set(&tcon->num_remote_opens, 0);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001736 rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
1737 (void **) &req, &total_len);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001738 if (rc) {
1739 kfree(unc_path);
1740 return rc;
1741 }
1742
Steve French5a77e752018-05-09 17:43:08 -05001743 if (smb3_encryption_required(tcon))
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -08001744 flags |= CIFS_TRANSFORM_REQ;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001745
1746 iov[0].iov_base = (char *)req;
Ronnie Sahlberg661bb9432017-11-09 12:14:23 +11001747 /* 1 for pad */
1748 iov[0].iov_len = total_len - 1;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001749
1750 /* Testing shows that buffer offset must be at location of Buffer[0] */
1751 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
Ronnie Sahlberg661bb9432017-11-09 12:14:23 +11001752 - 1 /* pad */);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001753 req->PathLength = cpu_to_le16(unc_path_len - 2);
1754 iov[1].iov_base = unc_path;
1755 iov[1].iov_len = unc_path_len;
1756
Ronnie Sahlberge71ab2a2019-03-21 14:59:02 +10001757 /*
1758 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
1759 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
Steve French8c11a602019-03-22 22:31:17 -05001760 * (Samba servers don't always set the flag so also check if null user)
Ronnie Sahlberge71ab2a2019-03-21 14:59:02 +10001761 */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001762 if ((server->dialect == SMB311_PROT_ID) &&
Ronnie Sahlberge71ab2a2019-03-21 14:59:02 +10001763 !smb3_encryption_required(tcon) &&
Steve French8c11a602019-03-22 22:31:17 -05001764 !(ses->session_flags &
1765 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
1766 ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
Steve French6188f282018-03-13 02:29:36 -05001767 req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
1768
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001769 memset(&rqst, 0, sizeof(struct smb_rqst));
1770 rqst.rq_iov = iov;
1771 rqst.rq_nvec = 2;
1772
Steve French4fe75c42019-02-14 01:19:02 -06001773 /* Need 64 for max size write so ask for more in case not there yet */
1774 req->sync_hdr.CreditRequest = cpu_to_le16(64);
1775
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001776 rc = cifs_send_recv(xid, ses, server,
1777 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001778 cifs_small_buf_release(req);
1779 rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
Steve Frenchf8af49d2018-10-28 00:47:11 -05001780 trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001781 if (rc != 0) {
Steve French35591342021-06-19 12:01:37 -05001782 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
1783 tcon->need_reconnect = true;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001784 goto tcon_error_exit;
1785 }
1786
Christophe JAILLETcd123002017-05-12 17:59:32 +02001787 switch (rsp->ShareType) {
1788 case SMB2_SHARE_TYPE_DISK:
Joe Perchesf96637b2013-05-04 22:12:25 -05001789 cifs_dbg(FYI, "connection to disk share\n");
Christophe JAILLETcd123002017-05-12 17:59:32 +02001790 break;
1791 case SMB2_SHARE_TYPE_PIPE:
Aurelien Aptelb327a712018-01-24 13:46:10 +01001792 tcon->pipe = true;
Joe Perchesf96637b2013-05-04 22:12:25 -05001793 cifs_dbg(FYI, "connection to pipe share\n");
Christophe JAILLETcd123002017-05-12 17:59:32 +02001794 break;
1795 case SMB2_SHARE_TYPE_PRINT:
Aurelien Aptelb327a712018-01-24 13:46:10 +01001796 tcon->print = true;
Joe Perchesf96637b2013-05-04 22:12:25 -05001797 cifs_dbg(FYI, "connection to printer\n");
Christophe JAILLETcd123002017-05-12 17:59:32 +02001798 break;
1799 default:
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001800 cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001801 rc = -EOPNOTSUPP;
1802 goto tcon_error_exit;
1803 }
1804
1805 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
Steve French769ee6a2013-06-19 14:15:30 -05001806 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001807 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
1808 tcon->tidStatus = CifsGood;
1809 tcon->need_reconnect = false;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10001810 tcon->tid = rsp->sync_hdr.TreeId;
Zhao Hongjiang46b51d02013-06-24 01:57:47 -05001811 strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001812
1813 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
1814 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001815 cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n");
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -08001816
1817 if (tcon->seal &&
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001818 !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001819 cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n");
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -08001820
Steve Frenchde9f68df2013-11-15 11:26:24 -06001821 init_copy_chunk_defaults(tcon);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001822 if (server->ops->validate_negotiate)
1823 rc = server->ops->validate_negotiate(xid, tcon);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001824tcon_exit:
Steve Frenchf8af49d2018-10-28 00:47:11 -05001825
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001826 free_rsp_buf(resp_buftype, rsp);
1827 kfree(unc_path);
1828 return rc;
1829
1830tcon_error_exit:
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10001831 if (rsp && rsp->sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001832 cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001833 }
1834 goto tcon_exit;
1835}
1836
1837int
1838SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
1839{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001840 struct smb_rqst rqst;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001841 struct smb2_tree_disconnect_req *req; /* response is trivial */
1842 int rc = 0;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001843 struct cifs_ses *ses = tcon->ses;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001844 int flags = 0;
Ronnie Sahlberg4eecf4c2017-11-09 12:14:18 +11001845 unsigned int total_len;
1846 struct kvec iov[1];
1847 struct kvec rsp_iov;
1848 int resp_buf_type;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001849
Joe Perchesf96637b2013-05-04 22:12:25 -05001850 cifs_dbg(FYI, "Tree Disconnect\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001851
Christos Gkekas68a6afa2017-07-09 11:45:04 +01001852 if (!ses || !(ses->server))
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001853 return -EIO;
1854
1855 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
1856 return 0;
1857
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +10001858 close_cached_dir_lease(&tcon->crfid);
Ronnie Sahlberg72e73c72019-11-07 17:00:38 +10001859
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001860 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
1861 (void **) &req,
1862 &total_len);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001863 if (rc)
1864 return rc;
1865
Steve French5a77e752018-05-09 17:43:08 -05001866 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001867 flags |= CIFS_TRANSFORM_REQ;
1868
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10001869 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg4eecf4c2017-11-09 12:14:18 +11001870
1871 iov[0].iov_base = (char *)req;
1872 iov[0].iov_len = total_len;
1873
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001874 memset(&rqst, 0, sizeof(struct smb_rqst));
1875 rqst.rq_iov = iov;
1876 rqst.rq_nvec = 1;
1877
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001878 rc = cifs_send_recv(xid, ses, ses->server,
1879 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001880 cifs_small_buf_release(req);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001881 if (rc)
1882 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
1883
1884 return rc;
1885}
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001886
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001887
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001888static struct create_durable *
1889create_durable_buf(void)
1890{
1891 struct create_durable *buf;
1892
1893 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
1894 if (!buf)
1895 return NULL;
1896
1897 buf->ccontext.DataOffset = cpu_to_le16(offsetof
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001898 (struct create_durable, Data));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001899 buf->ccontext.DataLength = cpu_to_le32(16);
1900 buf->ccontext.NameOffset = cpu_to_le16(offsetof
1901 (struct create_durable, Name));
1902 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07001903 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001904 buf->Name[0] = 'D';
1905 buf->Name[1] = 'H';
1906 buf->Name[2] = 'n';
1907 buf->Name[3] = 'Q';
1908 return buf;
1909}
1910
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001911static struct create_durable *
1912create_reconnect_durable_buf(struct cifs_fid *fid)
1913{
1914 struct create_durable *buf;
1915
1916 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
1917 if (!buf)
1918 return NULL;
1919
1920 buf->ccontext.DataOffset = cpu_to_le16(offsetof
1921 (struct create_durable, Data));
1922 buf->ccontext.DataLength = cpu_to_le32(16);
1923 buf->ccontext.NameOffset = cpu_to_le16(offsetof
1924 (struct create_durable, Name));
1925 buf->ccontext.NameLength = cpu_to_le16(4);
1926 buf->Data.Fid.PersistentFileId = fid->persistent_fid;
1927 buf->Data.Fid.VolatileFileId = fid->volatile_fid;
Steve French12197a72014-05-14 05:29:40 -07001928 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001929 buf->Name[0] = 'D';
1930 buf->Name[1] = 'H';
1931 buf->Name[2] = 'n';
1932 buf->Name[3] = 'C';
1933 return buf;
1934}
1935
Steve French89a5bfa2019-07-18 17:22:18 -05001936static void
1937parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
1938{
1939 struct create_on_disk_id *pdisk_id = (struct create_on_disk_id *)cc;
1940
1941 cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n",
1942 pdisk_id->DiskFileId, pdisk_id->VolumeId);
1943 buf->IndexNumber = pdisk_id->DiskFileId;
1944}
1945
Steve Frenchab3459d2020-02-06 17:31:56 -06001946static void
Aurelien Aptel69dda302020-03-02 17:53:22 +01001947parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
1948 struct create_posix_rsp *posix)
Steve Frenchab3459d2020-02-06 17:31:56 -06001949{
Aurelien Aptel69dda302020-03-02 17:53:22 +01001950 int sid_len;
1951 u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset);
1952 u8 *end = beg + le32_to_cpu(cc->DataLength);
1953 u8 *sid;
Steve Frenchab3459d2020-02-06 17:31:56 -06001954
Aurelien Aptel69dda302020-03-02 17:53:22 +01001955 memset(posix, 0, sizeof(*posix));
Aurelien Aptel2e8af972020-02-08 15:50:56 +01001956
Aurelien Aptel69dda302020-03-02 17:53:22 +01001957 posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0));
1958 posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4));
1959 posix->mode = le32_to_cpu(*(__le32 *)(beg + 8));
1960
1961 sid = beg + 12;
1962 sid_len = posix_info_sid_size(sid, end);
1963 if (sid_len < 0) {
1964 cifs_dbg(VFS, "bad owner sid in posix create response\n");
1965 return;
1966 }
1967 memcpy(&posix->owner, sid, sid_len);
1968
1969 sid = sid + sid_len;
1970 sid_len = posix_info_sid_size(sid, end);
1971 if (sid_len < 0) {
1972 cifs_dbg(VFS, "bad group sid in posix create response\n");
1973 return;
1974 }
1975 memcpy(&posix->group, sid, sid_len);
1976
1977 cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n",
1978 posix->nlink, posix->mode, posix->reparse_tag);
Steve Frenchab3459d2020-02-06 17:31:56 -06001979}
1980
Steve French89a5bfa2019-07-18 17:22:18 -05001981void
1982smb2_parse_contexts(struct TCP_Server_Info *server,
Aurelien Aptel69dda302020-03-02 17:53:22 +01001983 struct smb2_create_rsp *rsp,
1984 unsigned int *epoch, char *lease_key, __u8 *oplock,
1985 struct smb2_file_all_info *buf,
1986 struct create_posix_rsp *posix)
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001987{
1988 char *data_offset;
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001989 struct create_context *cc;
Justin Maggarddeb7def2016-02-09 15:52:08 -08001990 unsigned int next;
1991 unsigned int remaining;
Pavel Shilovskyfd554392013-07-09 19:44:56 +04001992 char *name;
Colin Ian King3ece60e2020-10-20 15:19:36 +01001993 static const char smb3_create_tag_posix[] = {
1994 0x93, 0xAD, 0x25, 0x50, 0x9C,
1995 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83,
1996 0xDE, 0x96, 0x8B, 0xCD, 0x7C
1997 };
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001998
Steve French89a5bfa2019-07-18 17:22:18 -05001999 *oplock = 0;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002000 data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
Justin Maggarddeb7def2016-02-09 15:52:08 -08002001 remaining = le32_to_cpu(rsp->CreateContextsLength);
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002002 cc = (struct create_context *)data_offset;
Steve French89a5bfa2019-07-18 17:22:18 -05002003
2004 /* Initialize inode number to 0 in case no valid data in qfid context */
2005 if (buf)
2006 buf->IndexNumber = 0;
2007
Justin Maggarddeb7def2016-02-09 15:52:08 -08002008 while (remaining >= sizeof(struct create_context)) {
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002009 name = le16_to_cpu(cc->NameOffset) + (char *)cc;
Justin Maggarddeb7def2016-02-09 15:52:08 -08002010 if (le16_to_cpu(cc->NameLength) == 4 &&
Steve French89a5bfa2019-07-18 17:22:18 -05002011 strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0)
2012 *oplock = server->ops->parse_lease_buf(cc, epoch,
2013 lease_key);
2014 else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
2015 strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
2016 parse_query_id_ctxt(cc, buf);
Steve Frenchab3459d2020-02-06 17:31:56 -06002017 else if ((le16_to_cpu(cc->NameLength) == 16)) {
Aurelien Aptel69dda302020-03-02 17:53:22 +01002018 if (posix &&
2019 memcmp(name, smb3_create_tag_posix, 16) == 0)
2020 parse_posix_ctxt(cc, buf, posix);
Steve Frenchab3459d2020-02-06 17:31:56 -06002021 }
2022 /* else {
2023 cifs_dbg(FYI, "Context not matched with len %d\n",
2024 le16_to_cpu(cc->NameLength));
2025 cifs_dump_mem("Cctxt name: ", name, 4);
2026 } */
Justin Maggarddeb7def2016-02-09 15:52:08 -08002027
2028 next = le32_to_cpu(cc->Next);
2029 if (!next)
2030 break;
2031 remaining -= next;
2032 cc = (struct create_context *)((char *)cc + next);
2033 }
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002034
Steve French89a5bfa2019-07-18 17:22:18 -05002035 if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
2036 *oplock = rsp->OplockLevel;
2037
2038 return;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002039}
2040
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002041static int
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002042add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
Stefano Brivio729c0c92018-07-05 15:10:02 +02002043 unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002044{
2045 struct smb2_create_req *req = iov[0].iov_base;
2046 unsigned int num = *num_iovec;
2047
Stefano Brivio729c0c92018-07-05 15:10:02 +02002048 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002049 if (iov[num].iov_base == NULL)
2050 return -ENOMEM;
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002051 iov[num].iov_len = server->vals->create_lease_size;
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002052 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
2053 if (!req->CreateContextsOffset)
2054 req->CreateContextsOffset = cpu_to_le32(
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002055 sizeof(struct smb2_create_req) +
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002056 iov[num - 1].iov_len);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002057 le32_add_cpu(&req->CreateContextsLength,
2058 server->vals->create_lease_size);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002059 *num_iovec = num + 1;
2060 return 0;
2061}
2062
Steve Frenchb56eae42015-11-03 09:26:27 -06002063static struct create_durable_v2 *
Steve Frenchca567eb2019-03-29 16:31:07 -05002064create_durable_v2_buf(struct cifs_open_parms *oparms)
Steve Frenchb56eae42015-11-03 09:26:27 -06002065{
Steve Frenchca567eb2019-03-29 16:31:07 -05002066 struct cifs_fid *pfid = oparms->fid;
Steve Frenchb56eae42015-11-03 09:26:27 -06002067 struct create_durable_v2 *buf;
2068
2069 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
2070 if (!buf)
2071 return NULL;
2072
2073 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2074 (struct create_durable_v2, dcontext));
2075 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
2076 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2077 (struct create_durable_v2, Name));
2078 buf->ccontext.NameLength = cpu_to_le16(4);
2079
Steve Frenchca567eb2019-03-29 16:31:07 -05002080 /*
2081 * NB: Handle timeout defaults to 0, which allows server to choose
2082 * (most servers default to 120 seconds) and most clients default to 0.
2083 * This can be overridden at mount ("handletimeout=") if the user wants
2084 * a different persistent (or resilient) handle timeout for all opens
2085 * opens on a particular SMB3 mount.
2086 */
2087 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
Steve Frenchb56eae42015-11-03 09:26:27 -06002088 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
Steve Frenchfa70b872016-09-22 00:39:34 -05002089 generate_random_uuid(buf->dcontext.CreateGuid);
Steve Frenchb56eae42015-11-03 09:26:27 -06002090 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
2091
2092 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
2093 buf->Name[0] = 'D';
2094 buf->Name[1] = 'H';
2095 buf->Name[2] = '2';
2096 buf->Name[3] = 'Q';
2097 return buf;
2098}
2099
2100static struct create_durable_handle_reconnect_v2 *
2101create_reconnect_durable_v2_buf(struct cifs_fid *fid)
2102{
2103 struct create_durable_handle_reconnect_v2 *buf;
2104
2105 buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
2106 GFP_KERNEL);
2107 if (!buf)
2108 return NULL;
2109
2110 buf->ccontext.DataOffset =
2111 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2112 dcontext));
2113 buf->ccontext.DataLength =
2114 cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
2115 buf->ccontext.NameOffset =
2116 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2117 Name));
2118 buf->ccontext.NameLength = cpu_to_le16(4);
2119
2120 buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
2121 buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
2122 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2123 memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
2124
2125 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
2126 buf->Name[0] = 'D';
2127 buf->Name[1] = 'H';
2128 buf->Name[2] = '2';
2129 buf->Name[3] = 'C';
2130 return buf;
2131}
2132
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002133static int
Steve Frenchb56eae42015-11-03 09:26:27 -06002134add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002135 struct cifs_open_parms *oparms)
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002136{
2137 struct smb2_create_req *req = iov[0].iov_base;
2138 unsigned int num = *num_iovec;
2139
Steve Frenchca567eb2019-03-29 16:31:07 -05002140 iov[num].iov_base = create_durable_v2_buf(oparms);
Steve Frenchb56eae42015-11-03 09:26:27 -06002141 if (iov[num].iov_base == NULL)
2142 return -ENOMEM;
2143 iov[num].iov_len = sizeof(struct create_durable_v2);
2144 if (!req->CreateContextsOffset)
2145 req->CreateContextsOffset =
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002146 cpu_to_le32(sizeof(struct smb2_create_req) +
Steve Frenchb56eae42015-11-03 09:26:27 -06002147 iov[1].iov_len);
2148 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
Steve Frenchb56eae42015-11-03 09:26:27 -06002149 *num_iovec = num + 1;
2150 return 0;
2151}
2152
2153static int
2154add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
2155 struct cifs_open_parms *oparms)
2156{
2157 struct smb2_create_req *req = iov[0].iov_base;
2158 unsigned int num = *num_iovec;
2159
2160 /* indicate that we don't need to relock the file */
2161 oparms->reconnect = false;
2162
2163 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
2164 if (iov[num].iov_base == NULL)
2165 return -ENOMEM;
2166 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
2167 if (!req->CreateContextsOffset)
2168 req->CreateContextsOffset =
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002169 cpu_to_le32(sizeof(struct smb2_create_req) +
Steve Frenchb56eae42015-11-03 09:26:27 -06002170 iov[1].iov_len);
2171 le32_add_cpu(&req->CreateContextsLength,
2172 sizeof(struct create_durable_handle_reconnect_v2));
Steve Frenchb56eae42015-11-03 09:26:27 -06002173 *num_iovec = num + 1;
2174 return 0;
2175}
2176
2177static int
2178add_durable_context(struct kvec *iov, unsigned int *num_iovec,
2179 struct cifs_open_parms *oparms, bool use_persistent)
2180{
2181 struct smb2_create_req *req = iov[0].iov_base;
2182 unsigned int num = *num_iovec;
2183
2184 if (use_persistent) {
2185 if (oparms->reconnect)
2186 return add_durable_reconnect_v2_context(iov, num_iovec,
2187 oparms);
2188 else
2189 return add_durable_v2_context(iov, num_iovec, oparms);
2190 }
2191
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002192 if (oparms->reconnect) {
2193 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
2194 /* indicate that we don't need to relock the file */
2195 oparms->reconnect = false;
2196 } else
2197 iov[num].iov_base = create_durable_buf();
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002198 if (iov[num].iov_base == NULL)
2199 return -ENOMEM;
2200 iov[num].iov_len = sizeof(struct create_durable);
2201 if (!req->CreateContextsOffset)
2202 req->CreateContextsOffset =
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002203 cpu_to_le32(sizeof(struct smb2_create_req) +
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002204 iov[1].iov_len);
Wei Yongjun31f92e92013-08-26 14:34:46 +08002205 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002206 *num_iovec = num + 1;
2207 return 0;
2208}
2209
Steve Frenchcdeaf9d2018-08-10 02:25:06 -05002210/* See MS-SMB2 2.2.13.2.7 */
2211static struct crt_twarp_ctxt *
2212create_twarp_buf(__u64 timewarp)
2213{
2214 struct crt_twarp_ctxt *buf;
2215
2216 buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
2217 if (!buf)
2218 return NULL;
2219
2220 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2221 (struct crt_twarp_ctxt, Timestamp));
2222 buf->ccontext.DataLength = cpu_to_le32(8);
2223 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2224 (struct crt_twarp_ctxt, Name));
2225 buf->ccontext.NameLength = cpu_to_le16(4);
2226 /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
2227 buf->Name[0] = 'T';
2228 buf->Name[1] = 'W';
2229 buf->Name[2] = 'r';
2230 buf->Name[3] = 'p';
2231 buf->Timestamp = cpu_to_le64(timewarp);
2232 return buf;
2233}
2234
2235/* See MS-SMB2 2.2.13.2.7 */
2236static int
2237add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
2238{
2239 struct smb2_create_req *req = iov[0].iov_base;
2240 unsigned int num = *num_iovec;
2241
2242 iov[num].iov_base = create_twarp_buf(timewarp);
2243 if (iov[num].iov_base == NULL)
2244 return -ENOMEM;
2245 iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
2246 if (!req->CreateContextsOffset)
2247 req->CreateContextsOffset = cpu_to_le32(
2248 sizeof(struct smb2_create_req) +
2249 iov[num - 1].iov_len);
2250 le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt));
2251 *num_iovec = num + 1;
2252 return 0;
2253}
2254
Steve French975221e2020-06-12 09:25:21 -05002255/* See See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
2256static void setup_owner_group_sids(char *buf)
2257{
2258 struct owner_group_sids *sids = (struct owner_group_sids *)buf;
2259
2260 /* Populate the user ownership fields S-1-5-88-1 */
2261 sids->owner.Revision = 1;
2262 sids->owner.NumAuth = 3;
2263 sids->owner.Authority[5] = 5;
2264 sids->owner.SubAuthorities[0] = cpu_to_le32(88);
2265 sids->owner.SubAuthorities[1] = cpu_to_le32(1);
2266 sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val);
2267
2268 /* Populate the group ownership fields S-1-5-88-2 */
2269 sids->group.Revision = 1;
2270 sids->group.NumAuth = 3;
2271 sids->group.Authority[5] = 5;
2272 sids->group.SubAuthorities[0] = cpu_to_le32(88);
2273 sids->group.SubAuthorities[1] = cpu_to_le32(2);
2274 sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val);
Steve Frencha7a519a2020-06-12 14:49:47 -05002275
2276 cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val);
Steve French975221e2020-06-12 09:25:21 -05002277}
2278
Steve Frenchfdef6652019-12-06 02:02:38 -06002279/* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */
2280static struct crt_sd_ctxt *
Steve French975221e2020-06-12 09:25:21 -05002281create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
Steve Frenchfdef6652019-12-06 02:02:38 -06002282{
2283 struct crt_sd_ctxt *buf;
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002284 __u8 *ptr, *aclptr;
2285 unsigned int acelen, acl_size, ace_count;
Steve French975221e2020-06-12 09:25:21 -05002286 unsigned int owner_offset = 0;
2287 unsigned int group_offset = 0;
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002288 struct smb3_acl acl;
Steve Frenchfdef6652019-12-06 02:02:38 -06002289
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002290 *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
Steve French975221e2020-06-12 09:25:21 -05002291
2292 if (set_owner) {
Steve French975221e2020-06-12 09:25:21 -05002293 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
2294 *len += sizeof(struct owner_group_sids);
2295 }
2296
Steve Frenchfdef6652019-12-06 02:02:38 -06002297 buf = kzalloc(*len, GFP_KERNEL);
2298 if (buf == NULL)
2299 return buf;
2300
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002301 ptr = (__u8 *)&buf[1];
Steve French975221e2020-06-12 09:25:21 -05002302 if (set_owner) {
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002303 /* offset fields are from beginning of security descriptor not of create context */
2304 owner_offset = ptr - (__u8 *)&buf->sd;
Steve French975221e2020-06-12 09:25:21 -05002305 buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002306 group_offset = owner_offset + offsetof(struct owner_group_sids, group);
Steve French975221e2020-06-12 09:25:21 -05002307 buf->sd.OffsetGroup = cpu_to_le32(group_offset);
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002308
2309 setup_owner_group_sids(ptr);
2310 ptr += sizeof(struct owner_group_sids);
Steve French975221e2020-06-12 09:25:21 -05002311 } else {
2312 buf->sd.OffsetOwner = 0;
2313 buf->sd.OffsetGroup = 0;
2314 }
2315
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002316 buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
Steve French975221e2020-06-12 09:25:21 -05002317 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
Steve Frenchfdef6652019-12-06 02:02:38 -06002318 buf->ccontext.NameLength = cpu_to_le16(4);
2319 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
2320 buf->Name[0] = 'S';
2321 buf->Name[1] = 'e';
2322 buf->Name[2] = 'c';
2323 buf->Name[3] = 'D';
2324 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002325
Steve Frenchfdef6652019-12-06 02:02:38 -06002326 /*
2327 * ACL is "self relative" ie ACL is stored in contiguous block of memory
2328 * and "DP" ie the DACL is present
2329 */
2330 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
2331
2332 /* offset owner, group and Sbz1 and SACL are all zero */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002333 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2334 /* Ship the ACL for now. we will copy it into buf later. */
2335 aclptr = ptr;
2336 ptr += sizeof(struct cifs_acl);
Steve Frenchfdef6652019-12-06 02:02:38 -06002337
2338 /* create one ACE to hold the mode embedded in reserved special SID */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002339 acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
2340 ptr += acelen;
2341 acl_size = acelen + sizeof(struct smb3_acl);
2342 ace_count = 1;
Steve French975221e2020-06-12 09:25:21 -05002343
2344 if (set_owner) {
2345 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002346 acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr);
2347 ptr += acelen;
2348 acl_size += acelen;
2349 ace_count += 1;
2350 }
Steve French975221e2020-06-12 09:25:21 -05002351
Steve French643fbce2020-01-16 19:55:33 -06002352 /* and one more ACE to allow access for authenticated users */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002353 acelen = setup_authusers_ACE((struct cifs_ace *)ptr);
2354 ptr += acelen;
2355 acl_size += acelen;
2356 ace_count += 1;
Steve French975221e2020-06-12 09:25:21 -05002357
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002358 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
2359 acl.AclSize = cpu_to_le16(acl_size);
2360 acl.AceCount = cpu_to_le16(ace_count);
2361 memcpy(aclptr, &acl, sizeof(struct cifs_acl));
2362
2363 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2364 *len = ptr - (__u8 *)buf;
Steve French975221e2020-06-12 09:25:21 -05002365
Steve Frenchfdef6652019-12-06 02:02:38 -06002366 return buf;
2367}
2368
2369static int
Steve French975221e2020-06-12 09:25:21 -05002370add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner)
Steve Frenchfdef6652019-12-06 02:02:38 -06002371{
2372 struct smb2_create_req *req = iov[0].iov_base;
2373 unsigned int num = *num_iovec;
2374 unsigned int len = 0;
2375
Steve French975221e2020-06-12 09:25:21 -05002376 iov[num].iov_base = create_sd_buf(mode, set_owner, &len);
Steve Frenchfdef6652019-12-06 02:02:38 -06002377 if (iov[num].iov_base == NULL)
2378 return -ENOMEM;
2379 iov[num].iov_len = len;
2380 if (!req->CreateContextsOffset)
2381 req->CreateContextsOffset = cpu_to_le32(
2382 sizeof(struct smb2_create_req) +
2383 iov[num - 1].iov_len);
2384 le32_add_cpu(&req->CreateContextsLength, len);
2385 *num_iovec = num + 1;
2386 return 0;
2387}
2388
Steve Frenchff2a09e2019-07-06 14:41:38 -05002389static struct crt_query_id_ctxt *
2390create_query_id_buf(void)
2391{
2392 struct crt_query_id_ctxt *buf;
2393
2394 buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL);
2395 if (!buf)
2396 return NULL;
2397
2398 buf->ccontext.DataOffset = cpu_to_le16(0);
2399 buf->ccontext.DataLength = cpu_to_le32(0);
2400 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2401 (struct crt_query_id_ctxt, Name));
2402 buf->ccontext.NameLength = cpu_to_le16(4);
2403 /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */
2404 buf->Name[0] = 'Q';
2405 buf->Name[1] = 'F';
2406 buf->Name[2] = 'i';
2407 buf->Name[3] = 'd';
2408 return buf;
2409}
2410
2411/* See MS-SMB2 2.2.13.2.9 */
2412static int
2413add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
2414{
2415 struct smb2_create_req *req = iov[0].iov_base;
2416 unsigned int num = *num_iovec;
2417
2418 iov[num].iov_base = create_query_id_buf();
2419 if (iov[num].iov_base == NULL)
2420 return -ENOMEM;
2421 iov[num].iov_len = sizeof(struct crt_query_id_ctxt);
2422 if (!req->CreateContextsOffset)
2423 req->CreateContextsOffset = cpu_to_le32(
2424 sizeof(struct smb2_create_req) +
2425 iov[num - 1].iov_len);
2426 le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_query_id_ctxt));
2427 *num_iovec = num + 1;
2428 return 0;
2429}
2430
Aurelien Aptelf0712922017-02-22 14:47:17 +01002431static int
2432alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
2433 const char *treename, const __le16 *path)
2434{
2435 int treename_len, path_len;
2436 struct nls_table *cp;
2437 const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
2438
2439 /*
2440 * skip leading "\\"
2441 */
2442 treename_len = strlen(treename);
2443 if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
2444 return -EINVAL;
2445
2446 treename += 2;
2447 treename_len -= 2;
2448
2449 path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
2450
2451 /*
2452 * make room for one path separator between the treename and
2453 * path
2454 */
2455 *out_len = treename_len + 1 + path_len;
2456
2457 /*
2458 * final path needs to be null-terminated UTF16 with a
2459 * size aligned to 8
2460 */
2461
2462 *out_size = roundup((*out_len+1)*2, 8);
2463 *out_path = kzalloc(*out_size, GFP_KERNEL);
2464 if (!*out_path)
2465 return -ENOMEM;
2466
2467 cp = load_nls_default();
2468 cifs_strtoUTF16(*out_path, treename, treename_len, cp);
2469 UniStrcat(*out_path, sep);
2470 UniStrcat(*out_path, path);
2471 unload_nls(cp);
2472
2473 return 0;
2474}
2475
Steve Frenchbea851b2018-06-14 21:56:32 -05002476int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
2477 umode_t mode, struct cifs_tcon *tcon,
2478 const char *full_path,
2479 struct cifs_sb_info *cifs_sb)
2480{
2481 struct smb_rqst rqst;
2482 struct smb2_create_req *req;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002483 struct smb2_create_rsp *rsp = NULL;
Steve Frenchbea851b2018-06-14 21:56:32 -05002484 struct cifs_ses *ses = tcon->ses;
2485 struct kvec iov[3]; /* make sure at least one for each open context */
2486 struct kvec rsp_iov = {NULL, 0};
2487 int resp_buftype;
2488 int uni_path_len;
2489 __le16 *copy_path = NULL;
2490 int copy_size;
2491 int rc = 0;
2492 unsigned int n_iov = 2;
2493 __u32 file_attributes = 0;
2494 char *pc_buf = NULL;
2495 int flags = 0;
2496 unsigned int total_len;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002497 __le16 *utf16_path = NULL;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002498 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve Frenchbea851b2018-06-14 21:56:32 -05002499
2500 cifs_dbg(FYI, "mkdir\n");
2501
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002502 /* resource #1: path allocation */
2503 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2504 if (!utf16_path)
2505 return -ENOMEM;
2506
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002507 if (!ses || !server) {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002508 rc = -EIO;
2509 goto err_free_path;
2510 }
Steve Frenchbea851b2018-06-14 21:56:32 -05002511
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002512 /* resource #2: request */
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002513 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2514 (void **) &req, &total_len);
Steve Frenchbea851b2018-06-14 21:56:32 -05002515 if (rc)
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002516 goto err_free_path;
2517
Steve Frenchbea851b2018-06-14 21:56:32 -05002518
2519 if (smb3_encryption_required(tcon))
2520 flags |= CIFS_TRANSFORM_REQ;
2521
Steve Frenchbea851b2018-06-14 21:56:32 -05002522 req->ImpersonationLevel = IL_IMPERSONATION;
2523 req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
2524 /* File attributes ignored on open (used in create though) */
2525 req->FileAttributes = cpu_to_le32(file_attributes);
2526 req->ShareAccess = FILE_SHARE_ALL_LE;
2527 req->CreateDisposition = cpu_to_le32(FILE_CREATE);
2528 req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
2529
2530 iov[0].iov_base = (char *)req;
2531 /* -1 since last byte is buf[0] which is sent below (path) */
2532 iov[0].iov_len = total_len - 1;
2533
2534 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
2535
2536 /* [MS-SMB2] 2.2.13 NameOffset:
2537 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2538 * the SMB2 header, the file name includes a prefix that will
2539 * be processed during DFS name normalization as specified in
2540 * section 3.3.5.9. Otherwise, the file name is relative to
2541 * the share that is identified by the TreeId in the SMB2
2542 * header.
2543 */
2544 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2545 int name_len;
2546
2547 req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
2548 rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
2549 &name_len,
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002550 tcon->treeName, utf16_path);
2551 if (rc)
2552 goto err_free_req;
2553
Steve Frenchbea851b2018-06-14 21:56:32 -05002554 req->NameLength = cpu_to_le16(name_len * 2);
2555 uni_path_len = copy_size;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002556 /* free before overwriting resource */
2557 kfree(utf16_path);
2558 utf16_path = copy_path;
Steve Frenchbea851b2018-06-14 21:56:32 -05002559 } else {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002560 uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
Steve Frenchbea851b2018-06-14 21:56:32 -05002561 /* MUST set path len (NameLength) to 0 opening root of share */
2562 req->NameLength = cpu_to_le16(uni_path_len - 2);
2563 if (uni_path_len % 8 != 0) {
2564 copy_size = roundup(uni_path_len, 8);
2565 copy_path = kzalloc(copy_size, GFP_KERNEL);
2566 if (!copy_path) {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002567 rc = -ENOMEM;
2568 goto err_free_req;
Steve Frenchbea851b2018-06-14 21:56:32 -05002569 }
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002570 memcpy((char *)copy_path, (const char *)utf16_path,
Steve Frenchbea851b2018-06-14 21:56:32 -05002571 uni_path_len);
2572 uni_path_len = copy_size;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002573 /* free before overwriting resource */
2574 kfree(utf16_path);
2575 utf16_path = copy_path;
Steve Frenchbea851b2018-06-14 21:56:32 -05002576 }
2577 }
2578
2579 iov[1].iov_len = uni_path_len;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002580 iov[1].iov_base = utf16_path;
Steve Frenchbea851b2018-06-14 21:56:32 -05002581 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
2582
2583 if (tcon->posix_extensions) {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002584 /* resource #3: posix buf */
Steve Frenchbea851b2018-06-14 21:56:32 -05002585 rc = add_posix_context(iov, &n_iov, mode);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002586 if (rc)
2587 goto err_free_req;
Steve Frenchbea851b2018-06-14 21:56:32 -05002588 pc_buf = iov[n_iov-1].iov_base;
2589 }
2590
2591
2592 memset(&rqst, 0, sizeof(struct smb_rqst));
2593 rqst.rq_iov = iov;
2594 rqst.rq_nvec = n_iov;
2595
Steve Frenchd2f15422019-09-22 00:55:46 -05002596 /* no need to inc num_remote_opens because we close it just below */
Steve Frenchefe2e9f2019-02-26 19:08:12 -06002597 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
2598 FILE_WRITE_ATTRIBUTES);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002599 /* resource #4: response buffer */
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002600 rc = cifs_send_recv(xid, ses, server,
2601 &rqst, &resp_buftype, flags, &rsp_iov);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002602 if (rc) {
Steve Frenchbea851b2018-06-14 21:56:32 -05002603 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
2604 trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002605 CREATE_NOT_FILE,
2606 FILE_WRITE_ATTRIBUTES, rc);
2607 goto err_free_rsp_buf;
2608 }
2609
2610 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
2611 trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
2612 ses->Suid, CREATE_NOT_FILE,
2613 FILE_WRITE_ATTRIBUTES);
Steve Frenchbea851b2018-06-14 21:56:32 -05002614
2615 SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
2616
2617 /* Eventually save off posix specific response info and timestaps */
2618
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002619err_free_rsp_buf:
Steve Frenchbea851b2018-06-14 21:56:32 -05002620 free_rsp_buf(resp_buftype, rsp);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002621 kfree(pc_buf);
2622err_free_req:
2623 cifs_small_buf_release(req);
2624err_free_path:
2625 kfree(utf16_path);
Steve Frenchbea851b2018-06-14 21:56:32 -05002626 return rc;
Steve Frenchbea851b2018-06-14 21:56:32 -05002627}
Steve Frenchbea851b2018-06-14 21:56:32 -05002628
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002629int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002630SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
2631 struct smb_rqst *rqst, __u8 *oplock,
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002632 struct cifs_open_parms *oparms, __le16 *path)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002633{
2634 struct smb2_create_req *req;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002635 unsigned int n_iov = 2;
Pavel Shilovskyca819832013-07-05 12:21:26 +04002636 __u32 file_attributes = 0;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002637 int copy_size;
2638 int uni_path_len;
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002639 unsigned int total_len;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002640 struct kvec *iov = rqst->rq_iov;
2641 __le16 *copy_path;
2642 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002643
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002644 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2645 (void **) &req, &total_len);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002646 if (rc)
2647 return rc;
2648
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002649 iov[0].iov_base = (char *)req;
2650 /* -1 since last byte is buf[0] which is sent below (path) */
2651 iov[0].iov_len = total_len - 1;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07002652
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002653 if (oparms->create_options & CREATE_OPTION_READONLY)
Pavel Shilovskyca819832013-07-05 12:21:26 +04002654 file_attributes |= ATTR_READONLY;
Steve Frenchdb8b6312014-09-22 05:13:55 -05002655 if (oparms->create_options & CREATE_OPTION_SPECIAL)
2656 file_attributes |= ATTR_SYSTEM;
Pavel Shilovskyca819832013-07-05 12:21:26 +04002657
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002658 req->ImpersonationLevel = IL_IMPERSONATION;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002659 req->DesiredAccess = cpu_to_le32(oparms->desired_access);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002660 /* File attributes ignored on open (used in create though) */
2661 req->FileAttributes = cpu_to_le32(file_attributes);
2662 req->ShareAccess = FILE_SHARE_ALL_LE;
Steve Frenchc3ca78e2019-09-25 00:32:13 -05002663
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002664 req->CreateDisposition = cpu_to_le32(oparms->disposition);
2665 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002666 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
Aurelien Aptelf0712922017-02-22 14:47:17 +01002667
2668 /* [MS-SMB2] 2.2.13 NameOffset:
2669 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2670 * the SMB2 header, the file name includes a prefix that will
2671 * be processed during DFS name normalization as specified in
2672 * section 3.3.5.9. Otherwise, the file name is relative to
2673 * the share that is identified by the TreeId in the SMB2
2674 * header.
2675 */
2676 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2677 int name_len;
2678
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002679 req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
Aurelien Aptelf0712922017-02-22 14:47:17 +01002680 rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
2681 &name_len,
2682 tcon->treeName, path);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002683 if (rc)
Aurelien Aptelf0712922017-02-22 14:47:17 +01002684 return rc;
2685 req->NameLength = cpu_to_le16(name_len * 2);
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04002686 uni_path_len = copy_size;
2687 path = copy_path;
Aurelien Aptelf0712922017-02-22 14:47:17 +01002688 } else {
2689 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
2690 /* MUST set path len (NameLength) to 0 opening root of share */
2691 req->NameLength = cpu_to_le16(uni_path_len - 2);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002692 copy_size = uni_path_len;
2693 if (copy_size % 8 != 0)
2694 copy_size = roundup(copy_size, 8);
2695 copy_path = kzalloc(copy_size, GFP_KERNEL);
2696 if (!copy_path)
2697 return -ENOMEM;
2698 memcpy((char *)copy_path, (const char *)path,
2699 uni_path_len);
2700 uni_path_len = copy_size;
2701 path = copy_path;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002702 }
2703
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04002704 iov[1].iov_len = uni_path_len;
2705 iov[1].iov_base = path;
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04002706
Steve French3e7a02d2019-09-11 21:46:20 -05002707 if ((!server->oplocks) || (tcon->no_lease))
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002708 *oplock = SMB2_OPLOCK_LEVEL_NONE;
2709
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002710 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002711 *oplock == SMB2_OPLOCK_LEVEL_NONE)
2712 req->RequestedOplockLevel = *oplock;
Steve Frenchf8015682018-08-31 15:12:10 -05002713 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
2714 (oparms->create_options & CREATE_NOT_FILE))
2715 req->RequestedOplockLevel = *oplock; /* no srv lease support */
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002716 else {
Stefano Brivio729c0c92018-07-05 15:10:02 +02002717 rc = add_lease_context(server, iov, &n_iov,
2718 oparms->fid->lease_key, oplock);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002719 if (rc)
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002720 return rc;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002721 }
2722
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002723 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
2724 /* need to set Next field of lease context if we request it */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002725 if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002726 struct create_context *ccontext =
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002727 (struct create_context *)iov[n_iov-1].iov_base;
Steve French1c469432013-07-10 12:50:57 -05002728 ccontext->Next =
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002729 cpu_to_le32(server->vals->create_lease_size);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002730 }
Steve Frenchb56eae42015-11-03 09:26:27 -06002731
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002732 rc = add_durable_context(iov, &n_iov, oparms,
Steve Frenchb56eae42015-11-03 09:26:27 -06002733 tcon->use_persistent);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002734 if (rc)
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002735 return rc;
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002736 }
2737
Steve Frenchce558b02018-05-31 19:16:54 -05002738 if (tcon->posix_extensions) {
2739 if (n_iov > 2) {
2740 struct create_context *ccontext =
2741 (struct create_context *)iov[n_iov-1].iov_base;
2742 ccontext->Next =
2743 cpu_to_le32(iov[n_iov-1].iov_len);
2744 }
2745
2746 rc = add_posix_context(iov, &n_iov, oparms->mode);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002747 if (rc)
Steve Frenchce558b02018-05-31 19:16:54 -05002748 return rc;
Steve Frenchce558b02018-05-31 19:16:54 -05002749 }
Steve Frenchce558b02018-05-31 19:16:54 -05002750
Steve Frenchcdeaf9d2018-08-10 02:25:06 -05002751 if (tcon->snapshot_time) {
2752 cifs_dbg(FYI, "adding snapshot context\n");
2753 if (n_iov > 2) {
2754 struct create_context *ccontext =
2755 (struct create_context *)iov[n_iov-1].iov_base;
2756 ccontext->Next =
2757 cpu_to_le32(iov[n_iov-1].iov_len);
2758 }
2759
2760 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
2761 if (rc)
2762 return rc;
2763 }
2764
Steve French975221e2020-06-12 09:25:21 -05002765 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) {
2766 bool set_mode;
2767 bool set_owner;
2768
2769 if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) &&
2770 (oparms->mode != ACL_NO_MODE))
2771 set_mode = true;
2772 else {
2773 set_mode = false;
2774 oparms->mode = ACL_NO_MODE;
Steve Frenchc3ca78e2019-09-25 00:32:13 -05002775 }
2776
Steve French975221e2020-06-12 09:25:21 -05002777 if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
2778 set_owner = true;
2779 else
2780 set_owner = false;
2781
2782 if (set_owner | set_mode) {
2783 if (n_iov > 2) {
2784 struct create_context *ccontext =
2785 (struct create_context *)iov[n_iov-1].iov_base;
2786 ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
2787 }
2788
2789 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode);
2790 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
2791 if (rc)
2792 return rc;
2793 }
Steve Frenchc3ca78e2019-09-25 00:32:13 -05002794 }
2795
Steve Frenchff2a09e2019-07-06 14:41:38 -05002796 if (n_iov > 2) {
2797 struct create_context *ccontext =
2798 (struct create_context *)iov[n_iov-1].iov_base;
2799 ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
2800 }
2801 add_query_id_context(iov, &n_iov);
Steve Frenchcdeaf9d2018-08-10 02:25:06 -05002802
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002803 rqst->rq_nvec = n_iov;
2804 return 0;
2805}
2806
2807/* rq_iov[0] is the request and is released by cifs_small_buf_release().
2808 * All other vectors are freed by kfree().
2809 */
2810void
2811SMB2_open_free(struct smb_rqst *rqst)
2812{
2813 int i;
2814
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10002815 if (rqst && rqst->rq_iov) {
2816 cifs_small_buf_release(rqst->rq_iov[0].iov_base);
2817 for (i = 1; i < rqst->rq_nvec; i++)
2818 if (rqst->rq_iov[i].iov_base != smb2_padding)
2819 kfree(rqst->rq_iov[i].iov_base);
2820 }
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002821}
2822
2823int
2824SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2825 __u8 *oplock, struct smb2_file_all_info *buf,
Aurelien Aptel69dda302020-03-02 17:53:22 +01002826 struct create_posix_rsp *posix,
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002827 struct kvec *err_iov, int *buftype)
2828{
2829 struct smb_rqst rqst;
2830 struct smb2_create_rsp *rsp = NULL;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002831 struct cifs_tcon *tcon = oparms->tcon;
2832 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002833 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002834 struct kvec iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002835 struct kvec rsp_iov = {NULL, 0};
Garry McNultyef2298a2018-10-03 20:51:21 +01002836 int resp_buftype = CIFS_NO_BUFFER;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002837 int rc = 0;
2838 int flags = 0;
2839
2840 cifs_dbg(FYI, "create/open\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002841 if (!ses || !server)
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002842 return -EIO;
2843
2844 if (smb3_encryption_required(tcon))
2845 flags |= CIFS_TRANSFORM_REQ;
2846
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10002847 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002848 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10002849 rqst.rq_iov = iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002850 rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002851
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002852 rc = SMB2_open_init(tcon, server,
2853 &rqst, oplock, oparms, path);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002854 if (rc)
2855 goto creat_exit;
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10002856
Steve Frenchefe2e9f2019-02-26 19:08:12 -06002857 trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
2858 oparms->create_options, oparms->desired_access);
2859
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002860 rc = cifs_send_recv(xid, ses, server,
2861 &rqst, &resp_buftype, flags,
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002862 &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002863 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002864
2865 if (rc != 0) {
2866 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002867 if (err_iov && rsp) {
2868 *err_iov = rsp_iov;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002869 *buftype = resp_buftype;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002870 resp_buftype = CIFS_NO_BUFFER;
2871 rsp = NULL;
2872 }
Steve French28d59362018-05-30 21:42:34 -05002873 trace_smb3_open_err(xid, tcon->tid, ses->Suid,
2874 oparms->create_options, oparms->desired_access, rc);
Steve French7dcc82c2019-09-11 00:07:36 -05002875 if (rc == -EREMCHG) {
Joe Perchesa0a30362020-04-14 22:42:53 -07002876 pr_warn_once("server share %s deleted\n",
2877 tcon->treeName);
Steve French7dcc82c2019-09-11 00:07:36 -05002878 tcon->need_reconnect = true;
2879 }
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002880 goto creat_exit;
Steve French28d59362018-05-30 21:42:34 -05002881 } else
2882 trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid,
2883 ses->Suid, oparms->create_options,
2884 oparms->desired_access);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002885
Steve Frenchfae80442018-10-19 17:14:32 -05002886 atomic_inc(&tcon->num_remote_opens);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002887 oparms->fid->persistent_fid = rsp->PersistentFileId;
2888 oparms->fid->volatile_fid = rsp->VolatileFileId;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002889 oparms->fid->access = oparms->desired_access;
Steve Frenchdfe33f92018-10-30 19:50:31 -05002890#ifdef CONFIG_CIFS_DEBUG2
2891 oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId);
2892#endif /* CIFS_DEBUG2 */
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07002893
2894 if (buf) {
2895 memcpy(buf, &rsp->CreationTime, 32);
2896 buf->AllocationSize = rsp->AllocationSize;
2897 buf->EndOfFile = rsp->EndofFile;
2898 buf->Attributes = rsp->FileAttributes;
2899 buf->NumberOfLinks = cpu_to_le32(1);
2900 buf->DeletePending = 0;
2901 }
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002902
Steve French89a5bfa2019-07-18 17:22:18 -05002903
2904 smb2_parse_contexts(server, rsp, &oparms->fid->epoch,
Aurelien Aptel69dda302020-03-02 17:53:22 +01002905 oparms->fid->lease_key, oplock, buf, posix);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002906creat_exit:
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002907 SMB2_open_free(&rqst);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002908 free_rsp_buf(resp_buftype, rsp);
2909 return rc;
2910}
2911
Steve French4a72daf2013-06-25 00:20:49 -05002912int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002913SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
2914 struct smb_rqst *rqst,
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10002915 u64 persistent_fid, u64 volatile_fid, u32 opcode,
Steve French153322f2019-03-28 22:32:49 -05002916 bool is_fsctl, char *in_data, u32 indatalen,
2917 __u32 max_response_size)
Steve French4a72daf2013-06-25 00:20:49 -05002918{
2919 struct smb2_ioctl_req *req;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10002920 struct kvec *iov = rqst->rq_iov;
Ronnie Sahlberg97754682017-11-09 12:14:20 +11002921 unsigned int total_len;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10002922 int rc;
Long Li2c87d6a2019-05-15 14:09:05 -07002923 char *in_data_buf;
Steve French4a72daf2013-06-25 00:20:49 -05002924
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002925 rc = smb2_ioctl_req_init(opcode, tcon, server,
2926 (void **) &req, &total_len);
Steve French4a72daf2013-06-25 00:20:49 -05002927 if (rc)
2928 return rc;
2929
Long Li2c87d6a2019-05-15 14:09:05 -07002930 if (indatalen) {
2931 /*
2932 * indatalen is usually small at a couple of bytes max, so
2933 * just allocate through generic pool
2934 */
YueHaibingd81f0972019-06-01 03:31:10 +00002935 in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS);
Long Li2c87d6a2019-05-15 14:09:05 -07002936 if (!in_data_buf) {
2937 cifs_small_buf_release(req);
2938 return -ENOMEM;
2939 }
Long Li2c87d6a2019-05-15 14:09:05 -07002940 }
2941
Steve French4a72daf2013-06-25 00:20:49 -05002942 req->CtlCode = cpu_to_le32(opcode);
2943 req->PersistentFileId = persistent_fid;
2944 req->VolatileFileId = volatile_fid;
2945
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10002946 iov[0].iov_base = (char *)req;
2947 /*
2948 * If no input data, the size of ioctl struct in
2949 * protocol spec still includes a 1 byte data buffer,
2950 * but if input data passed to ioctl, we do not
2951 * want to double count this, so we do not send
2952 * the dummy one byte of data in iovec[0] if sending
2953 * input data (in iovec[1]).
2954 */
Steve French4a72daf2013-06-25 00:20:49 -05002955 if (indatalen) {
2956 req->InputCount = cpu_to_le32(indatalen);
2957 /* do not set InputOffset if no input data */
2958 req->InputOffset =
Ronnie Sahlberg97754682017-11-09 12:14:20 +11002959 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10002960 rqst->rq_nvec = 2;
2961 iov[0].iov_len = total_len - 1;
Long Li2c87d6a2019-05-15 14:09:05 -07002962 iov[1].iov_base = in_data_buf;
Steve French4a72daf2013-06-25 00:20:49 -05002963 iov[1].iov_len = indatalen;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10002964 } else {
2965 rqst->rq_nvec = 1;
2966 iov[0].iov_len = total_len;
2967 }
Steve French4a72daf2013-06-25 00:20:49 -05002968
2969 req->OutputOffset = 0;
2970 req->OutputCount = 0; /* MBZ */
2971
2972 /*
Steve French153322f2019-03-28 22:32:49 -05002973 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
2974 * We Could increase default MaxOutputResponse, but that could require
2975 * more credits. Windows typically sets this smaller, but for some
Steve French4a72daf2013-06-25 00:20:49 -05002976 * ioctls it may be useful to allow server to send more. No point
2977 * limiting what the server can send as long as fits in one credit
Steve French153322f2019-03-28 22:32:49 -05002978 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
2979 * to increase this limit up in the future.
2980 * Note that for snapshot queries that servers like Azure expect that
2981 * the first query be minimal size (and just used to get the number/size
2982 * of previous versions) so response size must be specified as EXACTLY
2983 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2984 * of eight bytes. Currently that is the only case where we set max
2985 * response size smaller.
Steve French4a72daf2013-06-25 00:20:49 -05002986 */
Steve French153322f2019-03-28 22:32:49 -05002987 req->MaxOutputResponse = cpu_to_le32(max_response_size);
Namjae Jeonebf57442020-06-11 11:21:19 +09002988 req->sync_hdr.CreditCharge =
2989 cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
2990 SMB2_MAX_BUFFER_SIZE));
Steve French4a72daf2013-06-25 00:20:49 -05002991 if (is_fsctl)
2992 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
2993 else
2994 req->Flags = 0;
2995
Steve French4587eee2017-10-25 15:58:31 -05002996 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
2997 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
Ronnie Sahlberg97754682017-11-09 12:14:20 +11002998 req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
Steve French4a72daf2013-06-25 00:20:49 -05002999
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003000 return 0;
3001}
3002
3003void
3004SMB2_ioctl_free(struct smb_rqst *rqst)
3005{
Murphy Zhou6457c202019-05-23 12:12:43 +08003006 int i;
Long Li2c87d6a2019-05-15 14:09:05 -07003007 if (rqst && rqst->rq_iov) {
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003008 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
Murphy Zhou6457c202019-05-23 12:12:43 +08003009 for (i = 1; i < rqst->rq_nvec; i++)
3010 if (rqst->rq_iov[i].iov_base != smb2_padding)
3011 kfree(rqst->rq_iov[i].iov_base);
Long Li2c87d6a2019-05-15 14:09:05 -07003012 }
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003013}
3014
Steve French153322f2019-03-28 22:32:49 -05003015
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003016/*
3017 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
3018 */
3019int
3020SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3021 u64 volatile_fid, u32 opcode, bool is_fsctl,
Steve French153322f2019-03-28 22:32:49 -05003022 char *in_data, u32 indatalen, u32 max_out_data_len,
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003023 char **out_data, u32 *plen /* returned data len */)
3024{
3025 struct smb_rqst rqst;
3026 struct smb2_ioctl_rsp *rsp = NULL;
3027 struct cifs_ses *ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003028 struct TCP_Server_Info *server;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003029 struct kvec iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003030 struct kvec rsp_iov = {NULL, 0};
3031 int resp_buftype = CIFS_NO_BUFFER;
3032 int rc = 0;
3033 int flags = 0;
3034
3035 cifs_dbg(FYI, "SMB2 IOCTL\n");
3036
3037 if (out_data != NULL)
3038 *out_data = NULL;
3039
3040 /* zero out returned data len, in case of error */
3041 if (plen)
3042 *plen = 0;
3043
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003044 if (!tcon)
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003045 return -EIO;
3046
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003047 ses = tcon->ses;
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003048 if (!ses)
3049 return -EIO;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003050
3051 server = cifs_pick_channel(ses);
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003052 if (!server)
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003053 return -EIO;
3054
3055 if (smb3_encryption_required(tcon))
3056 flags |= CIFS_TRANSFORM_REQ;
3057
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003058 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003059 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003060 rqst.rq_iov = iov;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003061 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003062
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003063 rc = SMB2_ioctl_init(tcon, server,
3064 &rqst, persistent_fid, volatile_fid, opcode,
Steve French153322f2019-03-28 22:32:49 -05003065 is_fsctl, in_data, indatalen, max_out_data_len);
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003066 if (rc)
3067 goto ioctl_exit;
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003068
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003069 rc = cifs_send_recv(xid, ses, server,
3070 &rqst, &resp_buftype, flags,
Ronnie Sahlberg97754682017-11-09 12:14:20 +11003071 &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003072 rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
Steve French4a72daf2013-06-25 00:20:49 -05003073
Steve Frencheccb4422018-05-17 21:16:55 -05003074 if (rc != 0)
3075 trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
3076 ses->Suid, 0, opcode, rc);
3077
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003078 if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) {
Steve French8e353102015-03-26 19:47:02 -05003079 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
Steve French4a72daf2013-06-25 00:20:49 -05003080 goto ioctl_exit;
Steve French9bf0c9c2013-11-16 18:05:28 -06003081 } else if (rc == -EINVAL) {
3082 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
3083 (opcode != FSCTL_SRV_COPYCHUNK)) {
Steve French8e353102015-03-26 19:47:02 -05003084 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
Steve French9bf0c9c2013-11-16 18:05:28 -06003085 goto ioctl_exit;
3086 }
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003087 } else if (rc == -E2BIG) {
3088 if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) {
3089 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3090 goto ioctl_exit;
3091 }
Steve French4a72daf2013-06-25 00:20:49 -05003092 }
3093
3094 /* check if caller wants to look at return data or just return rc */
3095 if ((plen == NULL) || (out_data == NULL))
3096 goto ioctl_exit;
3097
3098 *plen = le32_to_cpu(rsp->OutputCount);
3099
3100 /* We check for obvious errors in the output buffer length and offset */
3101 if (*plen == 0)
3102 goto ioctl_exit; /* server returned no data */
Dan Carpenter2d204ee2018-09-10 14:12:07 +03003103 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003104 cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
Steve French4a72daf2013-06-25 00:20:49 -05003105 *plen = 0;
3106 rc = -EIO;
3107 goto ioctl_exit;
3108 }
3109
Dan Carpenter2d204ee2018-09-10 14:12:07 +03003110 if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003111 cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
Steve French4a72daf2013-06-25 00:20:49 -05003112 le32_to_cpu(rsp->OutputOffset));
3113 *plen = 0;
3114 rc = -EIO;
3115 goto ioctl_exit;
3116 }
3117
YueHaibingd034fee2018-09-10 01:33:06 +00003118 *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
3119 *plen, GFP_KERNEL);
Steve French4a72daf2013-06-25 00:20:49 -05003120 if (*out_data == NULL) {
3121 rc = -ENOMEM;
3122 goto ioctl_exit;
3123 }
3124
Steve French4a72daf2013-06-25 00:20:49 -05003125ioctl_exit:
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003126 SMB2_ioctl_free(&rqst);
Steve French4a72daf2013-06-25 00:20:49 -05003127 free_rsp_buf(resp_buftype, rsp);
3128 return rc;
3129}
3130
Steve French64a5cfa2013-10-14 15:31:32 -05003131/*
3132 * Individual callers to ioctl worker function follow
3133 */
3134
3135int
3136SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
3137 u64 persistent_fid, u64 volatile_fid)
3138{
3139 int rc;
Steve French64a5cfa2013-10-14 15:31:32 -05003140 struct compress_ioctl fsctl_input;
3141 char *ret_data = NULL;
3142
3143 fsctl_input.CompressionState =
Fabian Frederickbc09d142014-12-10 15:41:15 -08003144 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
Steve French64a5cfa2013-10-14 15:31:32 -05003145
3146 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
3147 FSCTL_SET_COMPRESSION, true /* is_fsctl */,
3148 (char *)&fsctl_input /* data input */,
Steve French153322f2019-03-28 22:32:49 -05003149 2 /* in data len */, CIFSMaxBufSize /* max out data */,
3150 &ret_data /* out data */, NULL);
Steve French64a5cfa2013-10-14 15:31:32 -05003151
3152 cifs_dbg(FYI, "set compression rc %d\n", rc);
Steve French64a5cfa2013-10-14 15:31:32 -05003153
3154 return rc;
3155}
3156
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003157int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003158SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3159 struct smb_rqst *rqst,
Steve French43f8a6a2019-12-02 21:46:54 -06003160 u64 persistent_fid, u64 volatile_fid, bool query_attrs)
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003161{
3162 struct smb2_close_req *req;
3163 struct kvec *iov = rqst->rq_iov;
3164 unsigned int total_len;
3165 int rc;
3166
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003167 rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
3168 (void **) &req, &total_len);
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003169 if (rc)
3170 return rc;
3171
3172 req->PersistentFileId = persistent_fid;
3173 req->VolatileFileId = volatile_fid;
Steve French43f8a6a2019-12-02 21:46:54 -06003174 if (query_attrs)
3175 req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
3176 else
3177 req->Flags = 0;
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003178 iov[0].iov_base = (char *)req;
3179 iov[0].iov_len = total_len;
3180
3181 return 0;
3182}
3183
3184void
3185SMB2_close_free(struct smb_rqst *rqst)
3186{
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10003187 if (rqst && rqst->rq_iov)
3188 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003189}
3190
3191int
Steve French43f8a6a2019-12-02 21:46:54 -06003192__SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3193 u64 persistent_fid, u64 volatile_fid,
3194 struct smb2_file_network_open_info *pbuf)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003195{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003196 struct smb_rqst rqst;
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003197 struct smb2_close_rsp *rsp = NULL;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003198 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003199 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003200 struct kvec iov[1];
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003201 struct kvec rsp_iov;
Garry McNultyef2298a2018-10-03 20:51:21 +01003202 int resp_buftype = CIFS_NO_BUFFER;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003203 int rc = 0;
Steve French9e8fae22019-12-02 17:55:41 -06003204 int flags = 0;
Steve French43f8a6a2019-12-02 21:46:54 -06003205 bool query_attrs = false;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003206
Joe Perchesf96637b2013-05-04 22:12:25 -05003207 cifs_dbg(FYI, "Close\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003208
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003209 if (!ses || !server)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003210 return -EIO;
3211
Steve French5a77e752018-05-09 17:43:08 -05003212 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07003213 flags |= CIFS_TRANSFORM_REQ;
3214
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003215 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003216 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003217 rqst.rq_iov = iov;
3218 rqst.rq_nvec = 1;
3219
Steve French43f8a6a2019-12-02 21:46:54 -06003220 /* check if need to ask server to return timestamps in close response */
3221 if (pbuf)
3222 query_attrs = true;
3223
Steve Frenchf90f9792019-09-03 18:35:42 -05003224 trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003225 rc = SMB2_close_init(tcon, server,
3226 &rqst, persistent_fid, volatile_fid,
Steve French43f8a6a2019-12-02 21:46:54 -06003227 query_attrs);
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003228 if (rc)
3229 goto close_exit;
3230
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003231 rc = cifs_send_recv(xid, ses, server,
3232 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003233 rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003234
3235 if (rc != 0) {
Namjae Jeond4a029d2014-08-20 19:39:59 +09003236 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05003237 trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
3238 rc);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003239 goto close_exit;
Steve French43f8a6a2019-12-02 21:46:54 -06003240 } else {
Steve Frenchf90f9792019-09-03 18:35:42 -05003241 trace_smb3_close_done(xid, persistent_fid, tcon->tid,
3242 ses->Suid);
Steve French43f8a6a2019-12-02 21:46:54 -06003243 /*
3244 * Note that have to subtract 4 since struct network_open_info
3245 * has a final 4 byte pad that close response does not have
3246 */
3247 if (pbuf)
3248 memcpy(pbuf, (char *)&rsp->CreationTime, sizeof(*pbuf) - 4);
3249 }
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003250
Steve Frenchfae80442018-10-19 17:14:32 -05003251 atomic_dec(&tcon->num_remote_opens);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003252close_exit:
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003253 SMB2_close_free(&rqst);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003254 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -08003255
3256 /* retry close in a worker thread if this one is interrupted */
Paulo Alcantara2659d3b2021-01-13 14:16:16 -03003257 if (is_interrupt_error(rc)) {
Steve French9e8fae22019-12-02 17:55:41 -06003258 int tmp_rc;
3259
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -08003260 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
3261 volatile_fid);
3262 if (tmp_rc)
3263 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
3264 persistent_fid, tmp_rc);
3265 }
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -08003266 return rc;
Ronnie Sahlberg97ca1762018-04-26 08:50:49 -06003267}
3268
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10003269int
Steve French43f8a6a2019-12-02 21:46:54 -06003270SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3271 u64 persistent_fid, u64 volatile_fid)
3272{
3273 return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL);
3274}
3275
3276int
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10003277smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
3278 struct kvec *iov, unsigned int min_buf_size)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003279{
Ronnie Sahlbergc1596ff2018-04-09 18:06:30 +10003280 unsigned int smb_len = iov->iov_len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003281 char *end_of_smb = smb_len + (char *)iov->iov_base;
3282 char *begin_of_buf = offset + (char *)iov->iov_base;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003283 char *end_of_buf = begin_of_buf + buffer_length;
3284
3285
3286 if (buffer_length < min_buf_size) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003287 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
3288 buffer_length, min_buf_size);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003289 return -EINVAL;
3290 }
3291
3292 /* check if beyond RFC1001 maximum length */
3293 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003294 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
3295 buffer_length, smb_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003296 return -EINVAL;
3297 }
3298
3299 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07003300 cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003301 return -EINVAL;
3302 }
3303
3304 return 0;
3305}
3306
3307/*
3308 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
3309 * Caller must free buffer.
3310 */
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10003311int
3312smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
3313 struct kvec *iov, unsigned int minbufsize,
3314 char *data)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003315{
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003316 char *begin_of_buf = offset + (char *)iov->iov_base;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003317 int rc;
3318
3319 if (!data)
3320 return -EINVAL;
3321
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10003322 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003323 if (rc)
3324 return rc;
3325
3326 memcpy(data, begin_of_buf, buffer_length);
3327
3328 return 0;
3329}
3330
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003331int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003332SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3333 struct smb_rqst *rqst,
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003334 u64 persistent_fid, u64 volatile_fid,
3335 u8 info_class, u8 info_type, u32 additional_info,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003336 size_t output_len, size_t input_len, void *input)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003337{
3338 struct smb2_query_info_req *req;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003339 struct kvec *iov = rqst->rq_iov;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11003340 unsigned int total_len;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003341 int rc;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003342
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003343 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
3344 (void **) &req, &total_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003345 if (rc)
3346 return rc;
3347
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003348 req->InfoType = info_type;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003349 req->FileInfoClass = info_class;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003350 req->PersistentFileId = persistent_fid;
3351 req->VolatileFileId = volatile_fid;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003352 req->AdditionalInformation = cpu_to_le32(additional_info);
Aurelien Aptel48923d22017-10-17 14:47:17 +02003353
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003354 req->OutputBufferLength = cpu_to_le32(output_len);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003355 if (input_len) {
3356 req->InputBufferLength = cpu_to_le32(input_len);
3357 /* total_len for smb query request never close to le16 max */
3358 req->InputBufferOffset = cpu_to_le16(total_len - 1);
3359 memcpy(req->Buffer, input, input_len);
3360 }
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003361
3362 iov[0].iov_base = (char *)req;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11003363 /* 1 for Buffer */
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003364 iov[0].iov_len = total_len - 1 + input_len;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003365 return 0;
3366}
3367
3368void
3369SMB2_query_info_free(struct smb_rqst *rqst)
3370{
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10003371 if (rqst && rqst->rq_iov)
3372 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003373}
3374
3375static int
3376query_info(const unsigned int xid, struct cifs_tcon *tcon,
3377 u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
3378 u32 additional_info, size_t output_len, size_t min_len, void **data,
3379 u32 *dlen)
3380{
3381 struct smb_rqst rqst;
3382 struct smb2_query_info_rsp *rsp = NULL;
3383 struct kvec iov[1];
3384 struct kvec rsp_iov;
3385 int rc = 0;
Garry McNultyef2298a2018-10-03 20:51:21 +01003386 int resp_buftype = CIFS_NO_BUFFER;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003387 struct cifs_ses *ses = tcon->ses;
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003388 struct TCP_Server_Info *server;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003389 int flags = 0;
Colin Ian King73aaf922019-01-16 16:28:59 +00003390 bool allocated = false;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003391
3392 cifs_dbg(FYI, "Query Info\n");
3393
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003394 if (!ses)
3395 return -EIO;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003396 server = cifs_pick_channel(ses);
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003397 if (!server)
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003398 return -EIO;
3399
3400 if (smb3_encryption_required(tcon))
3401 flags |= CIFS_TRANSFORM_REQ;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003402
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003403 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003404 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003405 rqst.rq_iov = iov;
3406 rqst.rq_nvec = 1;
3407
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003408 rc = SMB2_query_info_init(tcon, server,
3409 &rqst, persistent_fid, volatile_fid,
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003410 info_class, info_type, additional_info,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003411 output_len, 0, NULL);
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003412 if (rc)
3413 goto qinf_exit;
3414
Steve Frenchd42043a2019-02-26 21:58:30 -06003415 trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
3416 ses->Suid, info_class, (__u32)info_type);
3417
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003418 rc = cifs_send_recv(xid, ses, server,
3419 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003420 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovskye5d04882012-09-19 16:03:26 +04003421
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003422 if (rc) {
3423 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05003424 trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
3425 ses->Suid, info_class, (__u32)info_type, rc);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003426 goto qinf_exit;
3427 }
3428
Steve Frenchd42043a2019-02-26 21:58:30 -06003429 trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
3430 ses->Suid, info_class, (__u32)info_type);
3431
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003432 if (dlen) {
3433 *dlen = le32_to_cpu(rsp->OutputBufferLength);
3434 if (!*data) {
3435 *data = kmalloc(*dlen, GFP_KERNEL);
3436 if (!*data) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003437 cifs_tcon_dbg(VFS,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003438 "Error %d allocating memory for acl\n",
3439 rc);
3440 *dlen = 0;
Colin Ian King73aaf922019-01-16 16:28:59 +00003441 rc = -ENOMEM;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003442 goto qinf_exit;
3443 }
Colin Ian King73aaf922019-01-16 16:28:59 +00003444 allocated = true;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003445 }
3446 }
3447
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10003448 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
3449 le32_to_cpu(rsp->OutputBufferLength),
3450 &rsp_iov, min_len, *data);
Colin Ian King73aaf922019-01-16 16:28:59 +00003451 if (rc && allocated) {
3452 kfree(*data);
3453 *data = NULL;
3454 *dlen = 0;
3455 }
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003456
3457qinf_exit:
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003458 SMB2_query_info_free(&rqst);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003459 free_rsp_buf(resp_buftype, rsp);
3460 return rc;
3461}
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003462
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003463int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3464 u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003465{
3466 return query_info(xid, tcon, persistent_fid, volatile_fid,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003467 FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +04003468 sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003469 sizeof(struct smb2_file_all_info), (void **)&data,
3470 NULL);
3471}
3472
3473int
Steve Frenchb1bc1872020-06-11 20:23:38 -05003474SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3475 u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
3476{
3477 size_t output_len = sizeof(struct smb311_posix_qinfo *) +
3478 (sizeof(struct cifs_sid) * 2) + (PATH_MAX * 2);
3479 *plen = 0;
3480
3481 return query_info(xid, tcon, persistent_fid, volatile_fid,
3482 SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
3483 output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
3484}
3485
3486int
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003487SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003488 u64 persistent_fid, u64 volatile_fid,
Boris Protopopov9541b812020-12-17 20:58:08 +00003489 void **data, u32 *plen, u32 extra_info)
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003490{
Boris Protopopov9541b812020-12-17 20:58:08 +00003491 __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
3492 extra_info;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003493 *plen = 0;
3494
3495 return query_info(xid, tcon, persistent_fid, volatile_fid,
3496 0, SMB2_O_INFO_SECURITY, additional_info,
Shirish Pargaonkaree25c6d2018-06-04 06:46:22 -05003497 SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003498}
3499
3500int
3501SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
3502 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
3503{
3504 return query_info(xid, tcon, persistent_fid, volatile_fid,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003505 FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003506 sizeof(struct smb2_file_internal_info),
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003507 sizeof(struct smb2_file_internal_info),
3508 (void **)&uniqueid, NULL);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003509}
3510
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003511/*
Steve Frenchc3498182019-09-15 22:38:52 -05003512 * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory
3513 * See MS-SMB2 2.2.35 and 2.2.36
3514 */
3515
zhengbin388962e2019-09-23 15:06:18 +08003516static int
Steve Frenchc3498182019-09-15 22:38:52 -05003517SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003518 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3519 u64 persistent_fid, u64 volatile_fid,
3520 u32 completion_filter, bool watch_tree)
Steve Frenchc3498182019-09-15 22:38:52 -05003521{
3522 struct smb2_change_notify_req *req;
3523 struct kvec *iov = rqst->rq_iov;
3524 unsigned int total_len;
3525 int rc;
3526
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003527 rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
3528 (void **) &req, &total_len);
Steve Frenchc3498182019-09-15 22:38:52 -05003529 if (rc)
3530 return rc;
3531
3532 req->PersistentFileId = persistent_fid;
3533 req->VolatileFileId = volatile_fid;
Steve Frenchd26c2dd2020-02-06 06:00:14 -06003534 /* See note 354 of MS-SMB2, 64K max */
Steve French52870d52019-10-01 21:25:46 -05003535 req->OutputBufferLength =
3536 cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
Steve Frenchc3498182019-09-15 22:38:52 -05003537 req->CompletionFilter = cpu_to_le32(completion_filter);
3538 if (watch_tree)
3539 req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
3540 else
3541 req->Flags = 0;
3542
3543 iov[0].iov_base = (char *)req;
3544 iov[0].iov_len = total_len;
3545
3546 return 0;
3547}
3548
3549int
3550SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
3551 u64 persistent_fid, u64 volatile_fid, bool watch_tree,
3552 u32 completion_filter)
3553{
3554 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003555 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve Frenchc3498182019-09-15 22:38:52 -05003556 struct smb_rqst rqst;
3557 struct kvec iov[1];
3558 struct kvec rsp_iov = {NULL, 0};
3559 int resp_buftype = CIFS_NO_BUFFER;
3560 int flags = 0;
3561 int rc = 0;
3562
3563 cifs_dbg(FYI, "change notify\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003564 if (!ses || !server)
Steve Frenchc3498182019-09-15 22:38:52 -05003565 return -EIO;
3566
3567 if (smb3_encryption_required(tcon))
3568 flags |= CIFS_TRANSFORM_REQ;
3569
3570 memset(&rqst, 0, sizeof(struct smb_rqst));
3571 memset(&iov, 0, sizeof(iov));
3572 rqst.rq_iov = iov;
3573 rqst.rq_nvec = 1;
3574
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003575 rc = SMB2_notify_init(xid, &rqst, tcon, server,
3576 persistent_fid, volatile_fid,
Steve Frenchc3498182019-09-15 22:38:52 -05003577 completion_filter, watch_tree);
3578 if (rc)
3579 goto cnotify_exit;
3580
3581 trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
3582 (u8)watch_tree, completion_filter);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003583 rc = cifs_send_recv(xid, ses, server,
3584 &rqst, &resp_buftype, flags, &rsp_iov);
Steve Frenchc3498182019-09-15 22:38:52 -05003585
3586 if (rc != 0) {
3587 cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
3588 trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
3589 (u8)watch_tree, completion_filter, rc);
3590 } else
3591 trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
3592 ses->Suid, (u8)watch_tree, completion_filter);
3593
3594 cnotify_exit:
3595 if (rqst.rq_iov)
3596 cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
3597 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3598 return rc;
3599}
3600
3601
3602
3603/*
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003604 * This is a no-op for now. We're not really interested in the reply, but
3605 * rather in the fact that the server sent one and that server->lstrp
3606 * gets updated.
3607 *
3608 * FIXME: maybe we should consider checking that the reply matches request?
3609 */
3610static void
3611smb2_echo_callback(struct mid_q_entry *mid)
3612{
3613 struct TCP_Server_Info *server = mid->callback_data;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003614 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003615 struct cifs_credits credits = { .value = 0, .instance = 0 };
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003616
Pavel Shilovsky0fd1d372019-01-15 15:08:48 -08003617 if (mid->mid_state == MID_RESPONSE_RECEIVED
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003618 || mid->mid_state == MID_RESPONSE_MALFORMED) {
3619 credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
3620 credits.instance = server->reconnect_instance;
3621 }
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003622
3623 DeleteMidQEntry(mid);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003624 add_credits(server, &credits, CIFS_ECHO_OP);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003625}
3626
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003627void smb2_reconnect_server(struct work_struct *work)
3628{
3629 struct TCP_Server_Info *server = container_of(work,
3630 struct TCP_Server_Info, reconnect.work);
3631 struct cifs_ses *ses;
3632 struct cifs_tcon *tcon, *tcon2;
3633 struct list_head tmp_list;
3634 int tcon_exist = false;
Germano Percossi18ea4312017-04-07 12:29:36 +01003635 int rc;
3636 int resched = false;
3637
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003638
3639 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
3640 mutex_lock(&server->reconnect_mutex);
3641
3642 INIT_LIST_HEAD(&tmp_list);
3643 cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
3644
3645 spin_lock(&cifs_tcp_ses_lock);
3646 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3647 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
Pavel Shilovsky96a988f2016-11-29 11:31:23 -08003648 if (tcon->need_reconnect || tcon->need_reopen_files) {
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003649 tcon->tc_count++;
3650 list_add_tail(&tcon->rlist, &tmp_list);
3651 tcon_exist = true;
3652 }
3653 }
Ronnie Sahlberg0ff2b012019-06-05 10:15:34 +10003654 /*
3655 * IPC has the same lifetime as its session and uses its
3656 * refcount.
3657 */
Aurelien Aptelb327a712018-01-24 13:46:10 +01003658 if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
3659 list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
3660 tcon_exist = true;
Ronnie Sahlberg0ff2b012019-06-05 10:15:34 +10003661 ses->ses_count++;
Aurelien Aptelb327a712018-01-24 13:46:10 +01003662 }
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003663 }
3664 /*
3665 * Get the reference to server struct to be sure that the last call of
3666 * cifs_put_tcon() in the loop below won't release the server pointer.
3667 */
3668 if (tcon_exist)
3669 server->srv_count++;
3670
3671 spin_unlock(&cifs_tcp_ses_lock);
3672
3673 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003674 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
Germano Percossi18ea4312017-04-07 12:29:36 +01003675 if (!rc)
Pavel Shilovsky96a988f2016-11-29 11:31:23 -08003676 cifs_reopen_persistent_handles(tcon);
Germano Percossi18ea4312017-04-07 12:29:36 +01003677 else
3678 resched = true;
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003679 list_del_init(&tcon->rlist);
Ronnie Sahlberg0ff2b012019-06-05 10:15:34 +10003680 if (tcon->ipc)
3681 cifs_put_smb_ses(tcon->ses);
3682 else
3683 cifs_put_tcon(tcon);
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003684 }
3685
3686 cifs_dbg(FYI, "Reconnecting tcons finished\n");
Germano Percossi18ea4312017-04-07 12:29:36 +01003687 if (resched)
3688 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003689 mutex_unlock(&server->reconnect_mutex);
3690
3691 /* now we can safely release srv struct */
3692 if (tcon_exist)
3693 cifs_put_tcp_session(server, 1);
3694}
3695
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003696int
3697SMB2_echo(struct TCP_Server_Info *server)
3698{
3699 struct smb2_echo_req *req;
3700 int rc = 0;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003701 struct kvec iov[1];
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08003702 struct smb_rqst rqst = { .rq_iov = iov,
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003703 .rq_nvec = 1 };
Ronnie Sahlberg7f7ae752017-11-09 12:14:21 +11003704 unsigned int total_len;
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003705
Joe Perchesf96637b2013-05-04 22:12:25 -05003706 cifs_dbg(FYI, "In echo request\n");
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003707
Steve French4fcd1812016-06-22 20:12:05 -05003708 if (server->tcpStatus == CifsNeedNegotiate) {
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003709 /* No need to send echo on newly established connections */
Stefan Metzmacherb08484d2020-02-24 14:14:59 +01003710 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003711 return rc;
Steve French4fcd1812016-06-22 20:12:05 -05003712 }
3713
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003714 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
3715 (void **)&req, &total_len);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003716 if (rc)
3717 return rc;
3718
Ronnie Sahlberg7f7ae752017-11-09 12:14:21 +11003719 req->sync_hdr.CreditRequest = cpu_to_le16(1);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003720
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003721 iov[0].iov_len = total_len;
3722 iov[0].iov_base = (char *)req;
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003723
Pavel Shilovsky9b7c18a2016-11-16 14:06:17 -08003724 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -08003725 server, CIFS_ECHO_OP, NULL);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003726 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003727 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003728
3729 cifs_small_buf_release(req);
3730 return rc;
3731}
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003732
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003733void
3734SMB2_flush_free(struct smb_rqst *rqst)
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003735{
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003736 if (rqst && rqst->rq_iov)
3737 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3738}
3739
3740int
3741SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003742 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3743 u64 persistent_fid, u64 volatile_fid)
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003744{
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003745 struct smb2_flush_req *req;
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003746 struct kvec *iov = rqst->rq_iov;
Ronnie Sahlberg1f444e42017-11-20 11:24:39 +11003747 unsigned int total_len;
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003748 int rc;
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003749
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003750 rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
3751 (void **) &req, &total_len);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003752 if (rc)
3753 return rc;
3754
3755 req->PersistentFileId = persistent_fid;
3756 req->VolatileFileId = volatile_fid;
3757
3758 iov[0].iov_base = (char *)req;
Ronnie Sahlberg1f444e42017-11-20 11:24:39 +11003759 iov[0].iov_len = total_len;
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003760
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003761 return 0;
3762}
3763
3764int
3765SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3766 u64 volatile_fid)
3767{
3768 struct cifs_ses *ses = tcon->ses;
3769 struct smb_rqst rqst;
3770 struct kvec iov[1];
3771 struct kvec rsp_iov = {NULL, 0};
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003772 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003773 int resp_buftype = CIFS_NO_BUFFER;
3774 int flags = 0;
3775 int rc = 0;
3776
3777 cifs_dbg(FYI, "flush\n");
3778 if (!ses || !(ses->server))
3779 return -EIO;
3780
3781 if (smb3_encryption_required(tcon))
3782 flags |= CIFS_TRANSFORM_REQ;
3783
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003784 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003785 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003786 rqst.rq_iov = iov;
3787 rqst.rq_nvec = 1;
3788
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003789 rc = SMB2_flush_init(xid, &rqst, tcon, server,
3790 persistent_fid, volatile_fid);
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003791 if (rc)
3792 goto flush_exit;
3793
Steve Frenchf90f9792019-09-03 18:35:42 -05003794 trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003795 rc = cifs_send_recv(xid, ses, server,
3796 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003797
Steve Frencheccb4422018-05-17 21:16:55 -05003798 if (rc != 0) {
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003799 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05003800 trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
3801 rc);
Steve Frenchf90f9792019-09-03 18:35:42 -05003802 } else
3803 trace_smb3_flush_done(xid, persistent_fid, tcon->tid,
3804 ses->Suid);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003805
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003806 flush_exit:
3807 SMB2_flush_free(&rqst);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003808 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003809 return rc;
3810}
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003811
3812/*
3813 * To form a chain of read requests, any read requests after the first should
3814 * have the end_of_chain boolean set to true.
3815 */
3816static int
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08003817smb2_new_read_req(void **buf, unsigned int *total_len,
Long Li2dabfd52017-11-07 01:54:53 -07003818 struct cifs_io_parms *io_parms, struct cifs_readdata *rdata,
3819 unsigned int remaining_bytes, int request_type)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003820{
3821 int rc = -EACCES;
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -08003822 struct smb2_read_plain_req *req = NULL;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003823 struct smb2_sync_hdr *shdr;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003824 struct TCP_Server_Info *server = io_parms->server;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003825
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003826 rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
3827 (void **) &req, total_len);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003828 if (rc)
3829 return rc;
Long Li2dabfd52017-11-07 01:54:53 -07003830
Long Li2dabfd52017-11-07 01:54:53 -07003831 if (server == NULL)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003832 return -ECONNABORTED;
3833
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -08003834 shdr = &req->sync_hdr;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003835 shdr->ProcessId = cpu_to_le32(io_parms->pid);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003836
3837 req->PersistentFileId = io_parms->persistent_fid;
3838 req->VolatileFileId = io_parms->volatile_fid;
3839 req->ReadChannelInfoOffset = 0; /* reserved */
3840 req->ReadChannelInfoLength = 0; /* reserved */
3841 req->Channel = 0; /* reserved */
3842 req->MinimumCount = 0;
3843 req->Length = cpu_to_le32(io_parms->length);
3844 req->Offset = cpu_to_le64(io_parms->offset);
Steve Frenchd323c2462019-02-25 00:52:43 -06003845
3846 trace_smb3_read_enter(0 /* xid */,
3847 io_parms->persistent_fid,
3848 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
3849 io_parms->offset, io_parms->length);
Long Libd3dcc62017-11-22 17:38:47 -07003850#ifdef CONFIG_CIFS_SMB_DIRECT
3851 /*
3852 * If we want to do a RDMA write, fill in and append
3853 * smbd_buffer_descriptor_v1 to the end of read request
3854 */
Long Libb4c0412018-04-17 12:17:08 -07003855 if (server->rdma && rdata && !server->sign &&
Long Libd3dcc62017-11-22 17:38:47 -07003856 rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003857
Long Libd3dcc62017-11-22 17:38:47 -07003858 struct smbd_buffer_descriptor_v1 *v1;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003859 bool need_invalidate = server->dialect == SMB30_PROT_ID;
Long Libd3dcc62017-11-22 17:38:47 -07003860
3861 rdata->mr = smbd_register_mr(
3862 server->smbd_conn, rdata->pages,
Long Li7cf20bc2018-05-30 12:48:02 -07003863 rdata->nr_pages, rdata->page_offset,
3864 rdata->tailsz, true, need_invalidate);
Long Libd3dcc62017-11-22 17:38:47 -07003865 if (!rdata->mr)
Long Lib7972092019-04-05 21:36:34 +00003866 return -EAGAIN;
Long Libd3dcc62017-11-22 17:38:47 -07003867
3868 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
3869 if (need_invalidate)
3870 req->Channel = SMB2_CHANNEL_RDMA_V1;
3871 req->ReadChannelInfoOffset =
Steve French2026b062018-01-24 23:07:41 -06003872 cpu_to_le16(offsetof(struct smb2_read_plain_req, Buffer));
Long Libd3dcc62017-11-22 17:38:47 -07003873 req->ReadChannelInfoLength =
Steve French2026b062018-01-24 23:07:41 -06003874 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
Long Libd3dcc62017-11-22 17:38:47 -07003875 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
Steve French2026b062018-01-24 23:07:41 -06003876 v1->offset = cpu_to_le64(rdata->mr->mr->iova);
3877 v1->token = cpu_to_le32(rdata->mr->mr->rkey);
3878 v1->length = cpu_to_le32(rdata->mr->mr->length);
Long Libd3dcc62017-11-22 17:38:47 -07003879
3880 *total_len += sizeof(*v1) - 1;
3881 }
3882#endif
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003883 if (request_type & CHAINED_REQUEST) {
3884 if (!(request_type & END_OF_CHAIN)) {
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -08003885 /* next 8-byte aligned request */
3886 *total_len = DIV_ROUND_UP(*total_len, 8) * 8;
3887 shdr->NextCommand = cpu_to_le32(*total_len);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003888 } else /* END_OF_CHAIN */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003889 shdr->NextCommand = 0;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003890 if (request_type & RELATED_REQUEST) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003891 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003892 /*
3893 * Related requests use info from previous read request
3894 * in chain.
3895 */
Steve Frenchc0d46712021-05-15 09:52:22 -05003896 shdr->SessionId = 0xFFFFFFFFFFFFFFFF;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003897 shdr->TreeId = 0xFFFFFFFF;
Steve Frenchc0d46712021-05-15 09:52:22 -05003898 req->PersistentFileId = 0xFFFFFFFFFFFFFFFF;
3899 req->VolatileFileId = 0xFFFFFFFFFFFFFFFF;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003900 }
3901 }
3902 if (remaining_bytes > io_parms->length)
3903 req->RemainingBytes = cpu_to_le32(remaining_bytes);
3904 else
3905 req->RemainingBytes = 0;
3906
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08003907 *buf = req;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003908 return rc;
3909}
3910
3911static void
3912smb2_readv_callback(struct mid_q_entry *mid)
3913{
3914 struct cifs_readdata *rdata = mid->callback_data;
3915 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003916 struct TCP_Server_Info *server = rdata->server;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08003917 struct smb2_sync_hdr *shdr =
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003918 (struct smb2_sync_hdr *)rdata->iov[0].iov_base;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003919 struct cifs_credits credits = { .value = 0, .instance = 0 };
Steve French46f17d12019-09-04 23:07:52 -05003920 struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
3921 .rq_nvec = 1,
Jeff Layton8321fec2012-09-19 06:22:32 -07003922 .rq_pages = rdata->pages,
Long Li1dbe3462018-05-30 12:47:55 -07003923 .rq_offset = rdata->page_offset,
Jeff Layton8321fec2012-09-19 06:22:32 -07003924 .rq_npages = rdata->nr_pages,
3925 .rq_pagesz = rdata->pagesz,
3926 .rq_tailsz = rdata->tailsz };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003927
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003928 WARN_ONCE(rdata->server != mid->server,
3929 "rdata server %p != mid server %p",
3930 rdata->server, mid->server);
3931
Joe Perchesf96637b2013-05-04 22:12:25 -05003932 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
3933 __func__, mid->mid, mid->mid_state, rdata->result,
3934 rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003935
3936 switch (mid->mid_state) {
3937 case MID_RESPONSE_RECEIVED:
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003938 credits.value = le16_to_cpu(shdr->CreditRequest);
3939 credits.instance = server->reconnect_instance;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003940 /* result already set, check signature */
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003941 if (server->sign && !mid->decrypted) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07003942 int rc;
3943
Jeff Layton0b688cf2012-09-18 16:20:34 -07003944 rc = smb2_verify_signature(&rqst, server);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07003945 if (rc)
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003946 cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05003947 rc);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07003948 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003949 /* FIXME: should this be counted toward the initiating task? */
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003950 task_io_account_read(rdata->got_bytes);
3951 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003952 break;
3953 case MID_REQUEST_SUBMITTED:
3954 case MID_RETRY_NEEDED:
3955 rdata->result = -EAGAIN;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003956 if (server->sign && rdata->got_bytes)
3957 /* reset bytes number since we can not check a sign */
3958 rdata->got_bytes = 0;
3959 /* FIXME: should this be counted toward the initiating task? */
3960 task_io_account_read(rdata->got_bytes);
3961 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003962 break;
Pavel Shilovsky0fd1d372019-01-15 15:08:48 -08003963 case MID_RESPONSE_MALFORMED:
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003964 credits.value = le16_to_cpu(shdr->CreditRequest);
3965 credits.instance = server->reconnect_instance;
Miaohe Lin30b5ae22020-08-08 16:36:37 +08003966 fallthrough;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003967 default:
Pavel Shilovsky6b15eb12019-01-18 15:46:14 -08003968 rdata->result = -EIO;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003969 }
Long Libd3dcc62017-11-22 17:38:47 -07003970#ifdef CONFIG_CIFS_SMB_DIRECT
3971 /*
3972 * If this rdata has a memmory registered, the MR can be freed
3973 * MR needs to be freed as soon as I/O finishes to prevent deadlock
3974 * because they have limited number and are used for future I/Os
3975 */
3976 if (rdata->mr) {
3977 smbd_deregister_mr(rdata->mr);
3978 rdata->mr = NULL;
3979 }
3980#endif
Pavel Shilovsky082aaa82019-01-18 15:54:34 -08003981 if (rdata->result && rdata->result != -ENODATA) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003982 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08003983 trace_smb3_read_err(0 /* xid */,
3984 rdata->cfile->fid.persistent_fid,
3985 tcon->tid, tcon->ses->Suid, rdata->offset,
3986 rdata->bytes, rdata->result);
3987 } else
3988 trace_smb3_read_done(0 /* xid */,
3989 rdata->cfile->fid.persistent_fid,
3990 tcon->tid, tcon->ses->Suid,
3991 rdata->offset, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003992
3993 queue_work(cifsiod_wq, &rdata->work);
3994 DeleteMidQEntry(mid);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003995 add_credits(server, &credits, 0);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003996}
3997
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08003998/* smb2_async_readv - send an async read, and set up mid to handle result */
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003999int
4000smb2_async_readv(struct cifs_readdata *rdata)
4001{
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004002 int rc, flags = 0;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004003 char *buf;
4004 struct smb2_sync_hdr *shdr;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004005 struct cifs_io_parms io_parms;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004006 struct smb_rqst rqst = { .rq_iov = rdata->iov,
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004007 .rq_nvec = 1 };
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004008 struct TCP_Server_Info *server;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004009 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004010 unsigned int total_len;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004011
Joe Perchesf96637b2013-05-04 22:12:25 -05004012 cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
4013 __func__, rdata->offset, rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004014
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004015 if (!rdata->server)
4016 rdata->server = cifs_pick_channel(tcon->ses);
4017
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004018 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004019 io_parms.server = server = rdata->server;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004020 io_parms.offset = rdata->offset;
4021 io_parms.length = rdata->bytes;
4022 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
4023 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
4024 io_parms.pid = rdata->pid;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004025
Long Li2dabfd52017-11-07 01:54:53 -07004026 rc = smb2_new_read_req(
4027 (void **) &buf, &total_len, &io_parms, rdata, 0, 0);
Pavel Shilovskyf0b93cb2019-01-25 11:10:00 -08004028 if (rc)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004029 return rc;
4030
Steve French5a77e752018-05-09 17:43:08 -05004031 if (smb3_encryption_required(io_parms.tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004032 flags |= CIFS_TRANSFORM_REQ;
4033
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004034 rdata->iov[0].iov_base = buf;
4035 rdata->iov[0].iov_len = total_len;
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -08004036
4037 shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004038
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004039 if (rdata->credits.value > 0) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004040 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004041 SMB2_MAX_BUFFER_SIZE));
Aurelien Aptel88fd98a2021-03-04 17:51:48 +00004042 shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004043
4044 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4045 if (rc)
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004046 goto async_readv_out;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004047
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004048 flags |= CIFS_HAS_CREDITS;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004049 }
4050
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004051 kref_get(&rdata->refcount);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004052 rc = cifs_call_async(server, &rqst,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004053 cifs_readv_receive, smb2_readv_callback,
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -08004054 smb3_handle_read_data, rdata, flags,
4055 &rdata->credits);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004056 if (rc) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004057 kref_put(&rdata->refcount, cifs_readdata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004058 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004059 trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
4060 io_parms.tcon->tid,
4061 io_parms.tcon->ses->Suid,
4062 io_parms.offset, io_parms.length, rc);
4063 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004064
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004065async_readv_out:
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004066 cifs_small_buf_release(buf);
4067 return rc;
4068}
Pavel Shilovsky33319142012-09-18 16:20:29 -07004069
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004070int
4071SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
4072 unsigned int *nbytes, char **buf, int *buf_type)
4073{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004074 struct smb_rqst rqst;
Colin Ian King1efd4fc2019-07-31 10:05:26 +01004075 int resp_buftype, rc;
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -08004076 struct smb2_read_plain_req *req = NULL;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004077 struct smb2_read_rsp *rsp = NULL;
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004078 struct kvec iov[1];
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004079 struct kvec rsp_iov;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004080 unsigned int total_len;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004081 int flags = CIFS_LOG_ERROR;
4082 struct cifs_ses *ses = io_parms->tcon->ses;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004083
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004084 if (!io_parms->server)
4085 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4086
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004087 *nbytes = 0;
Long Li2dabfd52017-11-07 01:54:53 -07004088 rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004089 if (rc)
4090 return rc;
4091
Steve French5a77e752018-05-09 17:43:08 -05004092 if (smb3_encryption_required(io_parms->tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004093 flags |= CIFS_TRANSFORM_REQ;
4094
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004095 iov[0].iov_base = (char *)req;
4096 iov[0].iov_len = total_len;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004097
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004098 memset(&rqst, 0, sizeof(struct smb_rqst));
4099 rqst.rq_iov = iov;
4100 rqst.rq_nvec = 1;
4101
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004102 rc = cifs_send_recv(xid, ses, io_parms->server,
4103 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004104 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004105
4106 if (rc) {
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004107 if (rc != -ENODATA) {
4108 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
4109 cifs_dbg(VFS, "Send error in read = %d\n", rc);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004110 trace_smb3_read_err(xid, req->PersistentFileId,
4111 io_parms->tcon->tid, ses->Suid,
4112 io_parms->offset, io_parms->length,
4113 rc);
Steve Frenchb0a42f22019-02-25 15:02:58 -06004114 } else
4115 trace_smb3_read_done(xid, req->PersistentFileId,
4116 io_parms->tcon->tid, ses->Suid,
4117 io_parms->offset, 0);
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004118 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Ronnie Sahlberg05fd5c22019-04-23 16:39:45 +10004119 cifs_small_buf_release(req);
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004120 return rc == -ENODATA ? 0 : rc;
Steve Frencheccb4422018-05-17 21:16:55 -05004121 } else
4122 trace_smb3_read_done(xid, req->PersistentFileId,
4123 io_parms->tcon->tid, ses->Suid,
4124 io_parms->offset, io_parms->length);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004125
ZhangXiaoxu088aaf12019-04-06 15:47:39 +08004126 cifs_small_buf_release(req);
4127
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004128 *nbytes = le32_to_cpu(rsp->DataLength);
4129 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
4130 (*nbytes > io_parms->length)) {
4131 cifs_dbg(FYI, "bad length %d for count %d\n",
4132 *nbytes, io_parms->length);
4133 rc = -EIO;
4134 *nbytes = 0;
4135 }
4136
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004137 if (*buf) {
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004138 memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004139 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004140 } else if (resp_buftype != CIFS_NO_BUFFER) {
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004141 *buf = rsp_iov.iov_base;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004142 if (resp_buftype == CIFS_SMALL_BUFFER)
4143 *buf_type = CIFS_SMALL_BUFFER;
4144 else if (resp_buftype == CIFS_LARGE_BUFFER)
4145 *buf_type = CIFS_LARGE_BUFFER;
4146 }
4147 return rc;
4148}
4149
Pavel Shilovsky33319142012-09-18 16:20:29 -07004150/*
4151 * Check the mid_state and signature on received buffer (if any), and queue the
4152 * workqueue completion task.
4153 */
4154static void
4155smb2_writev_callback(struct mid_q_entry *mid)
4156{
4157 struct cifs_writedata *wdata = mid->callback_data;
4158 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004159 struct TCP_Server_Info *server = wdata->server;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004160 unsigned int written;
4161 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004162 struct cifs_credits credits = { .value = 0, .instance = 0 };
Pavel Shilovsky33319142012-09-18 16:20:29 -07004163
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004164 WARN_ONCE(wdata->server != mid->server,
4165 "wdata server %p != mid server %p",
4166 wdata->server, mid->server);
4167
Pavel Shilovsky33319142012-09-18 16:20:29 -07004168 switch (mid->mid_state) {
4169 case MID_RESPONSE_RECEIVED:
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004170 credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
4171 credits.instance = server->reconnect_instance;
4172 wdata->result = smb2_check_receive(mid, server, 0);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004173 if (wdata->result != 0)
4174 break;
4175
4176 written = le32_to_cpu(rsp->DataLength);
4177 /*
4178 * Mask off high 16 bits when bytes written as returned
4179 * by the server is greater than bytes requested by the
4180 * client. OS/2 servers are known to set incorrect
4181 * CountHigh values.
4182 */
4183 if (written > wdata->bytes)
4184 written &= 0xFFFF;
4185
4186 if (written < wdata->bytes)
4187 wdata->result = -ENOSPC;
4188 else
4189 wdata->bytes = written;
4190 break;
4191 case MID_REQUEST_SUBMITTED:
4192 case MID_RETRY_NEEDED:
4193 wdata->result = -EAGAIN;
4194 break;
Pavel Shilovsky0fd1d372019-01-15 15:08:48 -08004195 case MID_RESPONSE_MALFORMED:
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004196 credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
4197 credits.instance = server->reconnect_instance;
Miaohe Lin30b5ae22020-08-08 16:36:37 +08004198 fallthrough;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004199 default:
4200 wdata->result = -EIO;
4201 break;
4202 }
Long Lidb223a52017-11-22 17:38:45 -07004203#ifdef CONFIG_CIFS_SMB_DIRECT
4204 /*
4205 * If this wdata has a memory registered, the MR can be freed
4206 * The number of MRs available is limited, it's important to recover
4207 * used MR as soon as I/O is finished. Hold MR longer in the later
4208 * I/O process can possibly result in I/O deadlock due to lack of MR
4209 * to send request on I/O retry
4210 */
4211 if (wdata->mr) {
4212 smbd_deregister_mr(wdata->mr);
4213 wdata->mr = NULL;
4214 }
4215#endif
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004216 if (wdata->result) {
Pavel Shilovsky33319142012-09-18 16:20:29 -07004217 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004218 trace_smb3_write_err(0 /* no xid */,
4219 wdata->cfile->fid.persistent_fid,
4220 tcon->tid, tcon->ses->Suid, wdata->offset,
4221 wdata->bytes, wdata->result);
Steve Frenchd6fd4192020-02-05 16:52:11 -06004222 if (wdata->result == -ENOSPC)
Joe Perchesa0a30362020-04-14 22:42:53 -07004223 pr_warn_once("Out of space writing to %s\n",
4224 tcon->treeName);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004225 } else
4226 trace_smb3_write_done(0 /* no xid */,
4227 wdata->cfile->fid.persistent_fid,
4228 tcon->tid, tcon->ses->Suid,
4229 wdata->offset, wdata->bytes);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004230
4231 queue_work(cifsiod_wq, &wdata->work);
4232 DeleteMidQEntry(mid);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004233 add_credits(server, &credits, 0);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004234}
4235
4236/* smb2_async_writev - send an async write, and set up mid to handle result */
4237int
Steve French4a5c80d2014-02-07 20:45:12 -06004238smb2_async_writev(struct cifs_writedata *wdata,
4239 void (*release)(struct kref *kref))
Pavel Shilovsky33319142012-09-18 16:20:29 -07004240{
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004241 int rc = -EACCES, flags = 0;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004242 struct smb2_write_req *req = NULL;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004243 struct smb2_sync_hdr *shdr;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004244 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004245 struct TCP_Server_Info *server = wdata->server;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004246 struct kvec iov[1];
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004247 struct smb_rqst rqst = { };
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004248 unsigned int total_len;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004249
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004250 if (!wdata->server)
4251 server = wdata->server = cifs_pick_channel(tcon->ses);
4252
4253 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
4254 (void **) &req, &total_len);
Pavel Shilovskyf0b93cb2019-01-25 11:10:00 -08004255 if (rc)
4256 return rc;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004257
Steve French5a77e752018-05-09 17:43:08 -05004258 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004259 flags |= CIFS_TRANSFORM_REQ;
4260
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004261 shdr = (struct smb2_sync_hdr *)req;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004262 shdr->ProcessId = cpu_to_le32(wdata->cfile->pid);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004263
4264 req->PersistentFileId = wdata->cfile->fid.persistent_fid;
4265 req->VolatileFileId = wdata->cfile->fid.volatile_fid;
4266 req->WriteChannelInfoOffset = 0;
4267 req->WriteChannelInfoLength = 0;
4268 req->Channel = 0;
4269 req->Offset = cpu_to_le64(wdata->offset);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004270 req->DataOffset = cpu_to_le16(
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004271 offsetof(struct smb2_write_req, Buffer));
Pavel Shilovsky33319142012-09-18 16:20:29 -07004272 req->RemainingBytes = 0;
Steve Frenchd323c2462019-02-25 00:52:43 -06004273
4274 trace_smb3_write_enter(0 /* xid */, wdata->cfile->fid.persistent_fid,
4275 tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes);
Long Lidb223a52017-11-22 17:38:45 -07004276#ifdef CONFIG_CIFS_SMB_DIRECT
4277 /*
4278 * If we want to do a server RDMA read, fill in and append
4279 * smbd_buffer_descriptor_v1 to the end of write request
4280 */
Long Libb4c0412018-04-17 12:17:08 -07004281 if (server->rdma && !server->sign && wdata->bytes >=
Long Lidb223a52017-11-22 17:38:45 -07004282 server->smbd_conn->rdma_readwrite_threshold) {
Pavel Shilovsky33319142012-09-18 16:20:29 -07004283
Long Lidb223a52017-11-22 17:38:45 -07004284 struct smbd_buffer_descriptor_v1 *v1;
4285 bool need_invalidate = server->dialect == SMB30_PROT_ID;
4286
4287 wdata->mr = smbd_register_mr(
4288 server->smbd_conn, wdata->pages,
Long Li7cf20bc2018-05-30 12:48:02 -07004289 wdata->nr_pages, wdata->page_offset,
4290 wdata->tailsz, false, need_invalidate);
Long Lidb223a52017-11-22 17:38:45 -07004291 if (!wdata->mr) {
Long Lib7972092019-04-05 21:36:34 +00004292 rc = -EAGAIN;
Long Lidb223a52017-11-22 17:38:45 -07004293 goto async_writev_out;
4294 }
4295 req->Length = 0;
4296 req->DataOffset = 0;
Long Li7cf20bc2018-05-30 12:48:02 -07004297 if (wdata->nr_pages > 1)
4298 req->RemainingBytes =
4299 cpu_to_le32(
4300 (wdata->nr_pages - 1) * wdata->pagesz -
4301 wdata->page_offset + wdata->tailsz
4302 );
4303 else
4304 req->RemainingBytes = cpu_to_le32(wdata->tailsz);
Long Lidb223a52017-11-22 17:38:45 -07004305 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
4306 if (need_invalidate)
4307 req->Channel = SMB2_CHANNEL_RDMA_V1;
4308 req->WriteChannelInfoOffset =
Steve French2026b062018-01-24 23:07:41 -06004309 cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
Long Lidb223a52017-11-22 17:38:45 -07004310 req->WriteChannelInfoLength =
Steve French2026b062018-01-24 23:07:41 -06004311 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
Long Lidb223a52017-11-22 17:38:45 -07004312 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
Steve French2026b062018-01-24 23:07:41 -06004313 v1->offset = cpu_to_le64(wdata->mr->mr->iova);
4314 v1->token = cpu_to_le32(wdata->mr->mr->rkey);
4315 v1->length = cpu_to_le32(wdata->mr->mr->length);
Long Lidb223a52017-11-22 17:38:45 -07004316 }
4317#endif
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004318 iov[0].iov_len = total_len - 1;
4319 iov[0].iov_base = (char *)req;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004320
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004321 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004322 rqst.rq_nvec = 1;
Jeff Laytoneddb0792012-09-18 16:20:35 -07004323 rqst.rq_pages = wdata->pages;
Long Li57a929a2018-05-30 12:47:53 -07004324 rqst.rq_offset = wdata->page_offset;
Jeff Laytoneddb0792012-09-18 16:20:35 -07004325 rqst.rq_npages = wdata->nr_pages;
4326 rqst.rq_pagesz = wdata->pagesz;
4327 rqst.rq_tailsz = wdata->tailsz;
Long Lidb223a52017-11-22 17:38:45 -07004328#ifdef CONFIG_CIFS_SMB_DIRECT
4329 if (wdata->mr) {
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004330 iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
Long Lidb223a52017-11-22 17:38:45 -07004331 rqst.rq_npages = 0;
4332 }
4333#endif
Joe Perchesf96637b2013-05-04 22:12:25 -05004334 cifs_dbg(FYI, "async write at %llu %u bytes\n",
4335 wdata->offset, wdata->bytes);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004336
Long Lidb223a52017-11-22 17:38:45 -07004337#ifdef CONFIG_CIFS_SMB_DIRECT
4338 /* For RDMA read, I/O size is in RemainingBytes not in Length */
4339 if (!wdata->mr)
4340 req->Length = cpu_to_le32(wdata->bytes);
4341#else
Pavel Shilovsky33319142012-09-18 16:20:29 -07004342 req->Length = cpu_to_le32(wdata->bytes);
Long Lidb223a52017-11-22 17:38:45 -07004343#endif
Pavel Shilovsky33319142012-09-18 16:20:29 -07004344
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004345 if (wdata->credits.value > 0) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004346 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004347 SMB2_MAX_BUFFER_SIZE));
Aurelien Aptel88fd98a2021-03-04 17:51:48 +00004348 shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004349
4350 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
4351 if (rc)
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004352 goto async_writev_out;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004353
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004354 flags |= CIFS_HAS_CREDITS;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004355 }
4356
Pavel Shilovsky33319142012-09-18 16:20:29 -07004357 kref_get(&wdata->refcount);
Pavel Shilovsky9b7c18a2016-11-16 14:06:17 -08004358 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -08004359 wdata, flags, &wdata->credits);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004360
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004361 if (rc) {
Steve Frencheccb4422018-05-17 21:16:55 -05004362 trace_smb3_write_err(0 /* no xid */, req->PersistentFileId,
4363 tcon->tid, tcon->ses->Suid, wdata->offset,
4364 wdata->bytes, rc);
Steve French4a5c80d2014-02-07 20:45:12 -06004365 kref_put(&wdata->refcount, release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004366 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004367 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07004368
Pavel Shilovsky33319142012-09-18 16:20:29 -07004369async_writev_out:
4370 cifs_small_buf_release(req);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004371 return rc;
4372}
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004373
4374/*
4375 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
4376 * The length field from io_parms must be at least 1 and indicates a number of
4377 * elements with data to write that begins with position 1 in iov array. All
4378 * data length is specified by count.
4379 */
4380int
4381SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
4382 unsigned int *nbytes, struct kvec *iov, int n_vec)
4383{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004384 struct smb_rqst rqst;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004385 int rc = 0;
4386 struct smb2_write_req *req = NULL;
4387 struct smb2_write_rsp *rsp = NULL;
4388 int resp_buftype;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004389 struct kvec rsp_iov;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004390 int flags = 0;
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004391 unsigned int total_len;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004392 struct TCP_Server_Info *server;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004393
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004394 *nbytes = 0;
4395
4396 if (n_vec < 1)
4397 return rc;
4398
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004399 if (!io_parms->server)
4400 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4401 server = io_parms->server;
4402 if (server == NULL)
4403 return -ECONNABORTED;
4404
4405 rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
4406 (void **) &req, &total_len);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004407 if (rc)
4408 return rc;
4409
Steve French5a77e752018-05-09 17:43:08 -05004410 if (smb3_encryption_required(io_parms->tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004411 flags |= CIFS_TRANSFORM_REQ;
4412
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004413 req->sync_hdr.ProcessId = cpu_to_le32(io_parms->pid);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004414
4415 req->PersistentFileId = io_parms->persistent_fid;
4416 req->VolatileFileId = io_parms->volatile_fid;
4417 req->WriteChannelInfoOffset = 0;
4418 req->WriteChannelInfoLength = 0;
4419 req->Channel = 0;
4420 req->Length = cpu_to_le32(io_parms->length);
4421 req->Offset = cpu_to_le64(io_parms->offset);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004422 req->DataOffset = cpu_to_le16(
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004423 offsetof(struct smb2_write_req, Buffer));
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004424 req->RemainingBytes = 0;
4425
Steve Frenchd323c2462019-02-25 00:52:43 -06004426 trace_smb3_write_enter(xid, io_parms->persistent_fid,
4427 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
4428 io_parms->offset, io_parms->length);
4429
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004430 iov[0].iov_base = (char *)req;
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004431 /* 1 for Buffer */
4432 iov[0].iov_len = total_len - 1;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004433
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004434 memset(&rqst, 0, sizeof(struct smb_rqst));
4435 rqst.rq_iov = iov;
4436 rqst.rq_nvec = n_vec + 1;
4437
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004438 rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
4439 &rqst,
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004440 &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004441 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004442
4443 if (rc) {
Steve Frencheccb4422018-05-17 21:16:55 -05004444 trace_smb3_write_err(xid, req->PersistentFileId,
4445 io_parms->tcon->tid,
4446 io_parms->tcon->ses->Suid,
4447 io_parms->offset, io_parms->length, rc);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004448 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05004449 cifs_dbg(VFS, "Send error in write = %d\n", rc);
Steve Frencheccb4422018-05-17 21:16:55 -05004450 } else {
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004451 *nbytes = le32_to_cpu(rsp->DataLength);
Steve Frencheccb4422018-05-17 21:16:55 -05004452 trace_smb3_write_done(xid, req->PersistentFileId,
4453 io_parms->tcon->tid,
4454 io_parms->tcon->ses->Suid,
4455 io_parms->offset, *nbytes);
4456 }
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004457
ZhangXiaoxu6a3eb332019-04-06 15:47:38 +08004458 cifs_small_buf_release(req);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004459 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004460 return rc;
4461}
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004462
Aurelien Aptel69dda302020-03-02 17:53:22 +01004463int posix_info_sid_size(const void *beg, const void *end)
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004464{
4465 size_t subauth;
4466 int total;
4467
4468 if (beg + 1 > end)
4469 return -1;
4470
4471 subauth = *(u8 *)(beg+1);
4472 if (subauth < 1 || subauth > 15)
4473 return -1;
4474
4475 total = 1 + 1 + 6 + 4*subauth;
4476 if (beg + total > end)
4477 return -1;
4478
4479 return total;
4480}
4481
4482int posix_info_parse(const void *beg, const void *end,
4483 struct smb2_posix_info_parsed *out)
4484
4485{
4486 int total_len = 0;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004487 int owner_len, group_len;
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004488 int name_len;
4489 const void *owner_sid;
4490 const void *group_sid;
4491 const void *name;
4492
4493 /* if no end bound given, assume payload to be correct */
4494 if (!end) {
4495 const struct smb2_posix_info *p = beg;
4496
4497 end = beg + le32_to_cpu(p->NextEntryOffset);
4498 /* last element will have a 0 offset, pick a sensible bound */
4499 if (end == beg)
4500 end += 0xFFFF;
4501 }
4502
4503 /* check base buf */
4504 if (beg + sizeof(struct smb2_posix_info) > end)
4505 return -1;
4506 total_len = sizeof(struct smb2_posix_info);
4507
4508 /* check owner sid */
4509 owner_sid = beg + total_len;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004510 owner_len = posix_info_sid_size(owner_sid, end);
4511 if (owner_len < 0)
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004512 return -1;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004513 total_len += owner_len;
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004514
4515 /* check group sid */
4516 group_sid = beg + total_len;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004517 group_len = posix_info_sid_size(group_sid, end);
4518 if (group_len < 0)
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004519 return -1;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004520 total_len += group_len;
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004521
4522 /* check name len */
4523 if (beg + total_len + 4 > end)
4524 return -1;
4525 name_len = le32_to_cpu(*(__le32 *)(beg + total_len));
4526 if (name_len < 1 || name_len > 0xFFFF)
4527 return -1;
4528 total_len += 4;
4529
4530 /* check name */
4531 name = beg + total_len;
4532 if (name + name_len > end)
4533 return -1;
4534 total_len += name_len;
4535
4536 if (out) {
4537 out->base = beg;
4538 out->size = total_len;
4539 out->name_len = name_len;
4540 out->name = name;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004541 memcpy(&out->owner, owner_sid, owner_len);
4542 memcpy(&out->group, group_sid, group_len);
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004543 }
4544 return total_len;
4545}
4546
4547static int posix_info_extra_size(const void *beg, const void *end)
4548{
4549 int len = posix_info_parse(beg, end, NULL);
4550
4551 if (len < 0)
4552 return -1;
4553 return len - sizeof(struct smb2_posix_info);
4554}
4555
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004556static unsigned int
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004557num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
4558 size_t size)
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004559{
4560 int len;
4561 unsigned int entrycount = 0;
4562 unsigned int next_offset = 0;
Dan Carpenter56446f22018-09-06 12:48:22 +03004563 char *entryptr;
4564 FILE_DIRECTORY_INFO *dir_info;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004565
4566 if (bufstart == NULL)
4567 return 0;
4568
Dan Carpenter56446f22018-09-06 12:48:22 +03004569 entryptr = bufstart;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004570
4571 while (1) {
Dan Carpenter56446f22018-09-06 12:48:22 +03004572 if (entryptr + next_offset < entryptr ||
4573 entryptr + next_offset > end_of_buf ||
4574 entryptr + next_offset + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004575 cifs_dbg(VFS, "malformed search entry would overflow\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004576 break;
4577 }
4578
Dan Carpenter56446f22018-09-06 12:48:22 +03004579 entryptr = entryptr + next_offset;
4580 dir_info = (FILE_DIRECTORY_INFO *)entryptr;
4581
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004582 if (infotype == SMB_FIND_FILE_POSIX_INFO)
4583 len = posix_info_extra_size(entryptr, end_of_buf);
4584 else
4585 len = le32_to_cpu(dir_info->FileNameLength);
4586
4587 if (len < 0 ||
4588 entryptr + len < entryptr ||
Dan Carpenter56446f22018-09-06 12:48:22 +03004589 entryptr + len > end_of_buf ||
4590 entryptr + len + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004591 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
4592 end_of_buf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004593 break;
4594 }
4595
Dan Carpenter56446f22018-09-06 12:48:22 +03004596 *lastentry = entryptr;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004597 entrycount++;
4598
Dan Carpenter56446f22018-09-06 12:48:22 +03004599 next_offset = le32_to_cpu(dir_info->NextEntryOffset);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004600 if (!next_offset)
4601 break;
4602 }
4603
4604 return entrycount;
4605}
4606
4607/*
4608 * Readdir/FindFirst
4609 */
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004610int SMB2_query_directory_init(const unsigned int xid,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004611 struct cifs_tcon *tcon,
4612 struct TCP_Server_Info *server,
4613 struct smb_rqst *rqst,
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004614 u64 persistent_fid, u64 volatile_fid,
4615 int index, int info_level)
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004616{
4617 struct smb2_query_directory_req *req;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004618 unsigned char *bufptr;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004619 __le16 asteriks = cpu_to_le16('*');
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004620 unsigned int output_size = CIFSMaxBufSize -
4621 MAX_SMB2_CREATE_RESPONSE_SIZE -
4622 MAX_SMB2_CLOSE_RESPONSE_SIZE;
Ronnie Sahlberg7c00c3a2017-11-20 11:24:45 +11004623 unsigned int total_len;
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004624 struct kvec *iov = rqst->rq_iov;
4625 int len, rc;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004626
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004627 rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
4628 (void **) &req, &total_len);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004629 if (rc)
4630 return rc;
4631
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004632 switch (info_level) {
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004633 case SMB_FIND_FILE_DIRECTORY_INFO:
4634 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004635 break;
4636 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
4637 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004638 break;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004639 case SMB_FIND_FILE_POSIX_INFO:
4640 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
4641 break;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004642 default:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004643 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004644 info_level);
4645 return -EINVAL;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004646 }
4647
4648 req->FileIndex = cpu_to_le32(index);
4649 req->PersistentFileId = persistent_fid;
4650 req->VolatileFileId = volatile_fid;
4651
4652 len = 0x2;
4653 bufptr = req->Buffer;
4654 memcpy(bufptr, &asteriks, len);
4655
4656 req->FileNameOffset =
Ronnie Sahlberg7c00c3a2017-11-20 11:24:45 +11004657 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004658 req->FileNameLength = cpu_to_le16(len);
4659 /*
4660 * BB could be 30 bytes or so longer if we used SMB2 specific
4661 * buffer lengths, but this is safe and close enough.
4662 */
4663 output_size = min_t(unsigned int, output_size, server->maxBuf);
4664 output_size = min_t(unsigned int, output_size, 2 << 15);
4665 req->OutputBufferLength = cpu_to_le32(output_size);
4666
4667 iov[0].iov_base = (char *)req;
Ronnie Sahlberg7c00c3a2017-11-20 11:24:45 +11004668 /* 1 for Buffer */
4669 iov[0].iov_len = total_len - 1;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004670
4671 iov[1].iov_base = (char *)(req->Buffer);
4672 iov[1].iov_len = len;
4673
Steve Frenchd323c2462019-02-25 00:52:43 -06004674 trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
4675 tcon->ses->Suid, index, output_size);
4676
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004677 return 0;
4678}
4679
4680void SMB2_query_directory_free(struct smb_rqst *rqst)
4681{
4682 if (rqst && rqst->rq_iov) {
4683 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4684 }
4685}
4686
4687int
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004688smb2_parse_query_directory(struct cifs_tcon *tcon,
4689 struct kvec *rsp_iov,
4690 int resp_buftype,
4691 struct cifs_search_info *srch_inf)
4692{
4693 struct smb2_query_directory_rsp *rsp;
4694 size_t info_buf_size;
4695 char *end_of_smb;
4696 int rc;
4697
4698 rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
4699
4700 switch (srch_inf->info_level) {
4701 case SMB_FIND_FILE_DIRECTORY_INFO:
4702 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
4703 break;
4704 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
4705 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
4706 break;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004707 case SMB_FIND_FILE_POSIX_INFO:
4708 /* note that posix payload are variable size */
4709 info_buf_size = sizeof(struct smb2_posix_info);
4710 break;
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004711 default:
4712 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
4713 srch_inf->info_level);
4714 return -EINVAL;
4715 }
4716
4717 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
4718 le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
4719 info_buf_size);
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004720 if (rc) {
4721 cifs_tcon_dbg(VFS, "bad info payload");
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004722 return rc;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004723 }
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004724
4725 srch_inf->unicode = true;
4726
4727 if (srch_inf->ntwrk_buf_start) {
4728 if (srch_inf->smallBuf)
4729 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
4730 else
4731 cifs_buf_release(srch_inf->ntwrk_buf_start);
4732 }
4733 srch_inf->ntwrk_buf_start = (char *)rsp;
4734 srch_inf->srch_entries_start = srch_inf->last_entry =
4735 (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
4736 end_of_smb = rsp_iov->iov_len + (char *)rsp;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004737
4738 srch_inf->entries_in_buffer = num_entries(
4739 srch_inf->info_level,
4740 srch_inf->srch_entries_start,
4741 end_of_smb,
4742 &srch_inf->last_entry,
4743 info_buf_size);
4744
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004745 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
4746 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
4747 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
4748 srch_inf->srch_entries_start, srch_inf->last_entry);
4749 if (resp_buftype == CIFS_LARGE_BUFFER)
4750 srch_inf->smallBuf = false;
4751 else if (resp_buftype == CIFS_SMALL_BUFFER)
4752 srch_inf->smallBuf = true;
4753 else
Joe Perchesa0a30362020-04-14 22:42:53 -07004754 cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004755
4756 return 0;
4757}
4758
4759int
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004760SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
4761 u64 persistent_fid, u64 volatile_fid, int index,
4762 struct cifs_search_info *srch_inf)
4763{
4764 struct smb_rqst rqst;
4765 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
4766 struct smb2_query_directory_rsp *rsp = NULL;
4767 int resp_buftype = CIFS_NO_BUFFER;
4768 struct kvec rsp_iov;
4769 int rc = 0;
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004770 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004771 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004772 int flags = 0;
4773
YueHaibingc4985c32020-01-17 10:57:17 +08004774 if (!ses || !(ses->server))
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004775 return -EIO;
4776
4777 if (smb3_encryption_required(tcon))
4778 flags |= CIFS_TRANSFORM_REQ;
4779
4780 memset(&rqst, 0, sizeof(struct smb_rqst));
4781 memset(&iov, 0, sizeof(iov));
4782 rqst.rq_iov = iov;
4783 rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
4784
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004785 rc = SMB2_query_directory_init(xid, tcon, server,
4786 &rqst, persistent_fid,
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004787 volatile_fid, index,
4788 srch_inf->info_level);
4789 if (rc)
4790 goto qdir_exit;
4791
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004792 rc = cifs_send_recv(xid, ses, server,
4793 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004794 rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004795
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004796 if (rc) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004797 if (rc == -ENODATA &&
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10004798 rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004799 trace_smb3_query_dir_done(xid, persistent_fid,
4800 tcon->tid, tcon->ses->Suid, index, 0);
Pavel Shilovsky52755802014-08-18 20:49:57 +04004801 srch_inf->endOfSearch = true;
4802 rc = 0;
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004803 } else {
4804 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
4805 tcon->ses->Suid, index, 0, rc);
Pavel Shilovsky8e6e72a2019-01-26 12:21:32 -08004806 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004807 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004808 goto qdir_exit;
4809 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004810
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004811 rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype,
4812 srch_inf);
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004813 if (rc) {
4814 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
4815 tcon->ses->Suid, index, 0, rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004816 goto qdir_exit;
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004817 }
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004818 resp_buftype = CIFS_NO_BUFFER;
4819
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004820 trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
4821 tcon->ses->Suid, index, srch_inf->entries_in_buffer);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004822
4823qdir_exit:
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004824 SMB2_query_directory_free(&rqst);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004825 free_rsp_buf(resp_buftype, rsp);
4826 return rc;
4827}
4828
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004829int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004830SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
4831 struct smb_rqst *rqst,
4832 u64 persistent_fid, u64 volatile_fid, u32 pid,
4833 u8 info_class, u8 info_type, u32 additional_info,
4834 void **data, unsigned int *size)
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004835{
4836 struct smb2_set_info_req *req;
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004837 struct kvec *iov = rqst->rq_iov;
4838 unsigned int i, total_len;
4839 int rc;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004840
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004841 rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
4842 (void **) &req, &total_len);
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004843 if (rc)
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004844 return rc;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004845
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11004846 req->sync_hdr.ProcessId = cpu_to_le32(pid);
Shirish Pargaonkardac95342017-06-28 22:37:00 -05004847 req->InfoType = info_type;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004848 req->FileInfoClass = info_class;
4849 req->PersistentFileId = persistent_fid;
4850 req->VolatileFileId = volatile_fid;
Shirish Pargaonkardac95342017-06-28 22:37:00 -05004851 req->AdditionalInformation = cpu_to_le32(additional_info);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004852
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004853 req->BufferOffset =
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11004854 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004855 req->BufferLength = cpu_to_le32(*size);
4856
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004857 memcpy(req->Buffer, *data, *size);
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11004858 total_len += *size;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004859
4860 iov[0].iov_base = (char *)req;
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11004861 /* 1 for Buffer */
4862 iov[0].iov_len = total_len - 1;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004863
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004864 for (i = 1; i < rqst->rq_nvec; i++) {
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004865 le32_add_cpu(&req->BufferLength, size[i]);
4866 iov[i].iov_base = (char *)data[i];
4867 iov[i].iov_len = size[i];
4868 }
4869
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004870 return 0;
4871}
4872
4873void
4874SMB2_set_info_free(struct smb_rqst *rqst)
4875{
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10004876 if (rqst && rqst->rq_iov)
4877 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004878}
4879
4880static int
4881send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
4882 u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
4883 u8 info_type, u32 additional_info, unsigned int num,
4884 void **data, unsigned int *size)
4885{
4886 struct smb_rqst rqst;
4887 struct smb2_set_info_rsp *rsp = NULL;
4888 struct kvec *iov;
4889 struct kvec rsp_iov;
4890 int rc = 0;
4891 int resp_buftype;
4892 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004893 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004894 int flags = 0;
4895
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004896 if (!ses || !server)
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004897 return -EIO;
4898
4899 if (!num)
4900 return -EINVAL;
4901
4902 if (smb3_encryption_required(tcon))
4903 flags |= CIFS_TRANSFORM_REQ;
4904
4905 iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
4906 if (!iov)
4907 return -ENOMEM;
4908
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004909 memset(&rqst, 0, sizeof(struct smb_rqst));
4910 rqst.rq_iov = iov;
4911 rqst.rq_nvec = num;
4912
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004913 rc = SMB2_set_info_init(tcon, server,
4914 &rqst, persistent_fid, volatile_fid, pid,
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004915 info_class, info_type, additional_info,
4916 data, size);
4917 if (rc) {
4918 kfree(iov);
4919 return rc;
4920 }
4921
4922
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004923 rc = cifs_send_recv(xid, ses, server,
4924 &rqst, &resp_buftype, flags,
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11004925 &rsp_iov);
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004926 SMB2_set_info_free(&rqst);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004927 rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004928
Steve Frencheccb4422018-05-17 21:16:55 -05004929 if (rc != 0) {
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004930 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05004931 trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
4932 ses->Suid, info_class, (__u32)info_type, rc);
4933 }
Steve French7d3fb242013-11-18 09:56:28 -06004934
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004935 free_rsp_buf(resp_buftype, rsp);
4936 kfree(iov);
4937 return rc;
4938}
4939
4940int
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004941SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10004942 u64 volatile_fid, u32 pid, __le64 *eof)
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004943{
4944 struct smb2_file_eof_info info;
4945 void *data;
4946 unsigned int size;
4947
4948 info.EndOfFile = *eof;
4949
4950 data = &info;
4951 size = sizeof(struct smb2_file_eof_info);
4952
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10004953 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
Shirish Pargaonkardac95342017-06-28 22:37:00 -05004954 pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
4955 0, 1, &data, &size);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004956}
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004957
4958int
Shirish Pargaonkardac95342017-06-28 22:37:00 -05004959SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
4960 u64 persistent_fid, u64 volatile_fid,
4961 struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
4962{
4963 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
4964 current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
4965 1, (void **)&pnntsd, &pacllen);
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004966}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004967
4968int
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004969SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
4970 u64 persistent_fid, u64 volatile_fid,
4971 struct smb2_file_full_ea_info *buf, int len)
4972{
4973 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
4974 current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
4975 0, 1, (void **)&buf, &len);
4976}
4977
4978int
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004979SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
4980 const u64 persistent_fid, const u64 volatile_fid,
4981 __u8 oplock_level)
4982{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004983 struct smb_rqst rqst;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004984 int rc;
Ronnie Sahlberg0d5a2882018-06-01 10:53:03 +10004985 struct smb2_oplock_break *req = NULL;
Ronnie Sahlberg21ad9482017-11-20 11:24:43 +11004986 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004987 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004988 int flags = CIFS_OBREAK_OP;
Ronnie Sahlberg21ad9482017-11-20 11:24:43 +11004989 unsigned int total_len;
4990 struct kvec iov[1];
4991 struct kvec rsp_iov;
4992 int resp_buf_type;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004993
Joe Perchesf96637b2013-05-04 22:12:25 -05004994 cifs_dbg(FYI, "SMB2_oplock_break\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004995 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
4996 (void **) &req, &total_len);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004997 if (rc)
4998 return rc;
4999
Steve French5a77e752018-05-09 17:43:08 -05005000 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005001 flags |= CIFS_TRANSFORM_REQ;
5002
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005003 req->VolatileFid = volatile_fid;
5004 req->PersistentFid = persistent_fid;
5005 req->OplockLevel = oplock_level;
Ronnie Sahlberg21ad9482017-11-20 11:24:43 +11005006 req->sync_hdr.CreditRequest = cpu_to_le16(1);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005007
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10005008 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg21ad9482017-11-20 11:24:43 +11005009
5010 iov[0].iov_base = (char *)req;
5011 iov[0].iov_len = total_len;
5012
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005013 memset(&rqst, 0, sizeof(struct smb_rqst));
5014 rqst.rq_iov = iov;
5015 rqst.rq_nvec = 1;
5016
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005017 rc = cifs_send_recv(xid, ses, server,
5018 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005019 cifs_small_buf_release(req);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005020
5021 if (rc) {
5022 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05005023 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005024 }
5025
5026 return rc;
5027}
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005028
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005029void
5030smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
5031 struct kstatfs *kst)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005032{
5033 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
5034 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
5035 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
Sachin Prabhu42bec212017-08-03 13:09:03 +05305036 kst->f_bfree = kst->f_bavail =
5037 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005038 return;
5039}
5040
Steve French2d304212018-06-24 23:28:12 -05005041static void
5042copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
5043 struct kstatfs *kst)
5044{
5045 kst->f_bsize = le32_to_cpu(response_data->BlockSize);
5046 kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
5047 kst->f_bfree = le64_to_cpu(response_data->BlocksAvail);
5048 if (response_data->UserBlocksAvail == cpu_to_le64(-1))
5049 kst->f_bavail = kst->f_bfree;
5050 else
5051 kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
5052 if (response_data->TotalFileNodes != cpu_to_le64(-1))
5053 kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
5054 if (response_data->FreeFileNodes != cpu_to_le64(-1))
5055 kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
5056
5057 return;
5058}
Steve French2d304212018-06-24 23:28:12 -05005059
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005060static int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005061build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
5062 struct TCP_Server_Info *server,
5063 int level, int outbuf_len, u64 persistent_fid,
5064 u64 volatile_fid)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005065{
5066 int rc;
5067 struct smb2_query_info_req *req;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005068 unsigned int total_len;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005069
Joe Perchesf96637b2013-05-04 22:12:25 -05005070 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005071
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005072 if ((tcon->ses == NULL) || server == NULL)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005073 return -EIO;
5074
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005075 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
5076 (void **) &req, &total_len);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005077 if (rc)
5078 return rc;
5079
5080 req->InfoType = SMB2_O_INFO_FILESYSTEM;
5081 req->FileInfoClass = level;
5082 req->PersistentFileId = persistent_fid;
5083 req->VolatileFileId = volatile_fid;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005084 /* 1 for pad */
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005085 req->InputBufferOffset =
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005086 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005087 req->OutputBufferLength = cpu_to_le32(
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005088 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005089
5090 iov->iov_base = (char *)req;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005091 iov->iov_len = total_len;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005092 return 0;
5093}
5094
Steve French2d304212018-06-24 23:28:12 -05005095int
5096SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
5097 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5098{
5099 struct smb_rqst rqst;
5100 struct smb2_query_info_rsp *rsp = NULL;
5101 struct kvec iov;
5102 struct kvec rsp_iov;
5103 int rc = 0;
5104 int resp_buftype;
5105 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005106 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve French2d304212018-06-24 23:28:12 -05005107 FILE_SYSTEM_POSIX_INFO *info = NULL;
5108 int flags = 0;
5109
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005110 rc = build_qfs_info_req(&iov, tcon, server,
5111 FS_POSIX_INFORMATION,
Steve French2d304212018-06-24 23:28:12 -05005112 sizeof(FILE_SYSTEM_POSIX_INFO),
5113 persistent_fid, volatile_fid);
5114 if (rc)
5115 return rc;
5116
5117 if (smb3_encryption_required(tcon))
5118 flags |= CIFS_TRANSFORM_REQ;
5119
5120 memset(&rqst, 0, sizeof(struct smb_rqst));
5121 rqst.rq_iov = &iov;
5122 rqst.rq_nvec = 1;
5123
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005124 rc = cifs_send_recv(xid, ses, server,
5125 &rqst, &resp_buftype, flags, &rsp_iov);
Steve French2d304212018-06-24 23:28:12 -05005126 cifs_small_buf_release(iov.iov_base);
5127 if (rc) {
5128 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5129 goto posix_qfsinf_exit;
5130 }
5131 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
5132
5133 info = (FILE_SYSTEM_POSIX_INFO *)(
5134 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005135 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5136 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5137 sizeof(FILE_SYSTEM_POSIX_INFO));
Steve French2d304212018-06-24 23:28:12 -05005138 if (!rc)
5139 copy_posix_fs_info_to_kstatfs(info, fsdata);
5140
5141posix_qfsinf_exit:
5142 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
5143 return rc;
5144}
Steve French2d304212018-06-24 23:28:12 -05005145
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005146int
5147SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
5148 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5149{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005150 struct smb_rqst rqst;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005151 struct smb2_query_info_rsp *rsp = NULL;
5152 struct kvec iov;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005153 struct kvec rsp_iov;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005154 int rc = 0;
5155 int resp_buftype;
5156 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005157 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005158 struct smb2_fs_full_size_info *info = NULL;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005159 int flags = 0;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005160
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005161 rc = build_qfs_info_req(&iov, tcon, server,
5162 FS_FULL_SIZE_INFORMATION,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005163 sizeof(struct smb2_fs_full_size_info),
5164 persistent_fid, volatile_fid);
5165 if (rc)
5166 return rc;
5167
Steve French5a77e752018-05-09 17:43:08 -05005168 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005169 flags |= CIFS_TRANSFORM_REQ;
5170
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005171 memset(&rqst, 0, sizeof(struct smb_rqst));
5172 rqst.rq_iov = &iov;
5173 rqst.rq_nvec = 1;
5174
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005175 rc = cifs_send_recv(xid, ses, server,
5176 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005177 cifs_small_buf_release(iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005178 if (rc) {
5179 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
Steve French34f62642013-10-09 02:07:00 -05005180 goto qfsinf_exit;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005181 }
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005182 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005183
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005184 info = (struct smb2_fs_full_size_info *)(
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10005185 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005186 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5187 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5188 sizeof(struct smb2_fs_full_size_info));
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005189 if (!rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005190 smb2_copy_fs_info_to_kstatfs(info, fsdata);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005191
Steve French34f62642013-10-09 02:07:00 -05005192qfsinf_exit:
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005193 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Steve French34f62642013-10-09 02:07:00 -05005194 return rc;
5195}
5196
5197int
5198SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
Steven French21671142013-10-09 13:36:35 -05005199 u64 persistent_fid, u64 volatile_fid, int level)
Steve French34f62642013-10-09 02:07:00 -05005200{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005201 struct smb_rqst rqst;
Steve French34f62642013-10-09 02:07:00 -05005202 struct smb2_query_info_rsp *rsp = NULL;
5203 struct kvec iov;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005204 struct kvec rsp_iov;
Steve French34f62642013-10-09 02:07:00 -05005205 int rc = 0;
Steven French21671142013-10-09 13:36:35 -05005206 int resp_buftype, max_len, min_len;
Steve French34f62642013-10-09 02:07:00 -05005207 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005208 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve French34f62642013-10-09 02:07:00 -05005209 unsigned int rsp_len, offset;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005210 int flags = 0;
Steve French34f62642013-10-09 02:07:00 -05005211
Steven French21671142013-10-09 13:36:35 -05005212 if (level == FS_DEVICE_INFORMATION) {
5213 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
5214 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
5215 } else if (level == FS_ATTRIBUTE_INFORMATION) {
5216 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
5217 min_len = MIN_FS_ATTR_INFO_SIZE;
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005218 } else if (level == FS_SECTOR_SIZE_INFORMATION) {
5219 max_len = sizeof(struct smb3_fs_ss_info);
5220 min_len = sizeof(struct smb3_fs_ss_info);
Steve French21ba3842018-06-24 23:18:52 -05005221 } else if (level == FS_VOLUME_INFORMATION) {
5222 max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
5223 min_len = sizeof(struct smb3_fs_vol_info);
Steven French21671142013-10-09 13:36:35 -05005224 } else {
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005225 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
Steven French21671142013-10-09 13:36:35 -05005226 return -EINVAL;
5227 }
5228
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005229 rc = build_qfs_info_req(&iov, tcon, server,
5230 level, max_len,
Steve French34f62642013-10-09 02:07:00 -05005231 persistent_fid, volatile_fid);
5232 if (rc)
5233 return rc;
5234
Steve French5a77e752018-05-09 17:43:08 -05005235 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005236 flags |= CIFS_TRANSFORM_REQ;
5237
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005238 memset(&rqst, 0, sizeof(struct smb_rqst));
5239 rqst.rq_iov = &iov;
5240 rqst.rq_nvec = 1;
5241
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005242 rc = cifs_send_recv(xid, ses, server,
5243 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005244 cifs_small_buf_release(iov.iov_base);
Steve French34f62642013-10-09 02:07:00 -05005245 if (rc) {
5246 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5247 goto qfsattr_exit;
5248 }
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005249 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Steve French34f62642013-10-09 02:07:00 -05005250
5251 rsp_len = le32_to_cpu(rsp->OutputBufferLength);
5252 offset = le16_to_cpu(rsp->OutputBufferOffset);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005253 rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
Steven French21671142013-10-09 13:36:35 -05005254 if (rc)
5255 goto qfsattr_exit;
5256
5257 if (level == FS_ATTRIBUTE_INFORMATION)
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005258 memcpy(&tcon->fsAttrInfo, offset
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10005259 + (char *)rsp, min_t(unsigned int,
Steven French21671142013-10-09 13:36:35 -05005260 rsp_len, max_len));
5261 else if (level == FS_DEVICE_INFORMATION)
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005262 memcpy(&tcon->fsDevInfo, offset
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10005263 + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005264 else if (level == FS_SECTOR_SIZE_INFORMATION) {
5265 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005266 (offset + (char *)rsp);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005267 tcon->ss_flags = le32_to_cpu(ss_info->Flags);
5268 tcon->perf_sector_size =
5269 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
Steve French21ba3842018-06-24 23:18:52 -05005270 } else if (level == FS_VOLUME_INFORMATION) {
5271 struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
5272 (offset + (char *)rsp);
5273 tcon->vol_serial_number = vol_info->VolumeSerialNumber;
5274 tcon->vol_create_time = vol_info->VolumeCreationTime;
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005275 }
Steve French34f62642013-10-09 02:07:00 -05005276
5277qfsattr_exit:
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005278 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005279 return rc;
5280}
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005281
5282int
5283smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
5284 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
5285 const __u32 num_lock, struct smb2_lock_element *buf)
5286{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005287 struct smb_rqst rqst;
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005288 int rc = 0;
5289 struct smb2_lock_req *req = NULL;
5290 struct kvec iov[2];
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005291 struct kvec rsp_iov;
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005292 int resp_buf_type;
5293 unsigned int count;
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10005294 int flags = CIFS_NO_RSP_BUF;
Ronnie Sahlbergced93672017-11-21 10:07:27 +11005295 unsigned int total_len;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005296 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005297
Joe Perchesf96637b2013-05-04 22:12:25 -05005298 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005299
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005300 rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
5301 (void **) &req, &total_len);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005302 if (rc)
5303 return rc;
5304
Steve French5a77e752018-05-09 17:43:08 -05005305 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005306 flags |= CIFS_TRANSFORM_REQ;
5307
Ronnie Sahlbergced93672017-11-21 10:07:27 +11005308 req->sync_hdr.ProcessId = cpu_to_le32(pid);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005309 req->LockCount = cpu_to_le16(num_lock);
5310
5311 req->PersistentFileId = persist_fid;
5312 req->VolatileFileId = volatile_fid;
5313
5314 count = num_lock * sizeof(struct smb2_lock_element);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005315
5316 iov[0].iov_base = (char *)req;
Ronnie Sahlbergced93672017-11-21 10:07:27 +11005317 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005318 iov[1].iov_base = (char *)buf;
5319 iov[1].iov_len = count;
5320
5321 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005322
5323 memset(&rqst, 0, sizeof(struct smb_rqst));
5324 rqst.rq_iov = iov;
5325 rqst.rq_nvec = 2;
5326
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005327 rc = cifs_send_recv(xid, tcon->ses, server,
5328 &rqst, &resp_buf_type, flags,
Ronnie Sahlbergced93672017-11-21 10:07:27 +11005329 &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005330 cifs_small_buf_release(req);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005331 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05005332 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005333 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05005334 trace_smb3_lock_err(xid, persist_fid, tcon->tid,
5335 tcon->ses->Suid, rc);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005336 }
5337
5338 return rc;
5339}
5340
5341int
5342SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
5343 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
5344 const __u64 length, const __u64 offset, const __u32 lock_flags,
5345 const bool wait)
5346{
5347 struct smb2_lock_element lock;
5348
5349 lock.Offset = cpu_to_le64(offset);
5350 lock.Length = cpu_to_le64(length);
5351 lock.Flags = cpu_to_le32(lock_flags);
5352 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
5353 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
5354
5355 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
5356}
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005357
5358int
5359SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
5360 __u8 *lease_key, const __le32 lease_state)
5361{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005362 struct smb_rqst rqst;
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005363 int rc;
5364 struct smb2_lease_ack *req = NULL;
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005365 struct cifs_ses *ses = tcon->ses;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005366 int flags = CIFS_OBREAK_OP;
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005367 unsigned int total_len;
5368 struct kvec iov[1];
5369 struct kvec rsp_iov;
5370 int resp_buf_type;
Steve French179e44d2018-09-28 19:44:23 -05005371 __u64 *please_key_high;
5372 __u64 *please_key_low;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005373 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005374
Joe Perchesf96637b2013-05-04 22:12:25 -05005375 cifs_dbg(FYI, "SMB2_lease_break\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005376 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
5377 (void **) &req, &total_len);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005378 if (rc)
5379 return rc;
5380
Steve French5a77e752018-05-09 17:43:08 -05005381 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005382 flags |= CIFS_TRANSFORM_REQ;
5383
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005384 req->sync_hdr.CreditRequest = cpu_to_le16(1);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005385 req->StructureSize = cpu_to_le16(36);
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005386 total_len += 12;
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005387
5388 memcpy(req->LeaseKey, lease_key, 16);
5389 req->LeaseState = lease_state;
5390
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10005391 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005392
5393 iov[0].iov_base = (char *)req;
5394 iov[0].iov_len = total_len;
5395
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005396 memset(&rqst, 0, sizeof(struct smb_rqst));
5397 rqst.rq_iov = iov;
5398 rqst.rq_nvec = 1;
5399
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005400 rc = cifs_send_recv(xid, ses, server,
5401 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005402 cifs_small_buf_release(req);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005403
Aurelien Apteld339adc2019-01-31 13:46:07 +01005404 please_key_low = (__u64 *)lease_key;
5405 please_key_high = (__u64 *)(lease_key+8);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005406 if (rc) {
5407 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Steve French179e44d2018-09-28 19:44:23 -05005408 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
5409 ses->Suid, *please_key_low, *please_key_high, rc);
Joe Perchesf96637b2013-05-04 22:12:25 -05005410 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
Steve French179e44d2018-09-28 19:44:23 -05005411 } else
5412 trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
5413 ses->Suid, *please_key_low, *please_key_high);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005414
5415 return rc;
5416}