blob: 243de87b2d6d797a45a55a720cf44fbc5589fc7d [file] [log] [blame]
Steve French929be902021-06-18 00:31:49 -05001// SPDX-License-Identifier: LGPL-2.1
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04002/*
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04003 *
Steve French2b80d042013-06-23 18:43:37 -05004 * Copyright (C) International Business Machines Corp., 2009, 2013
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04005 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040011 */
12
13 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
14 /* Note that there are handle based routines which must be */
15 /* treated slightly differently for reconnection purposes since we never */
16 /* want to reuse a stale file handle and only the caller knows the file info */
17
18#include <linux/fs.h>
19#include <linux/kernel.h>
20#include <linux/vfs.h>
Pavel Shilovsky09a47072012-09-18 16:20:29 -070021#include <linux/task_io_accounting_ops.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040022#include <linux/uaccess.h>
Andrew Lunnc6e970a2017-03-28 23:45:06 +020023#include <linux/uuid.h>
Pavel Shilovsky33319142012-09-18 16:20:29 -070024#include <linux/pagemap.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040025#include <linux/xattr.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040026#include "cifsglob.h"
27#include "cifsacl.h"
28#include "cifsproto.h"
29#include "smb2proto.h"
30#include "cifs_unicode.h"
31#include "cifs_debug.h"
32#include "ntlmssp.h"
33#include "smb2status.h"
Pavel Shilovsky09a47072012-09-18 16:20:29 -070034#include "smb2glob.h"
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -070035#include "cifspdu.h"
Steve Frenchceb1b0b2015-09-24 00:52:37 -050036#include "cifs_spnego.h"
Long Lidb223a52017-11-22 17:38:45 -070037#include "smbdirect.h"
Steve Frencheccb4422018-05-17 21:16:55 -050038#include "trace.h"
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -020039#ifdef CONFIG_CIFS_DFS_UPCALL
40#include "dfs_cache.h"
41#endif
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040042
43/*
44 * The following table defines the expected "StructureSize" of SMB2 requests
45 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
46 *
47 * Note that commands are defined in smb2pdu.h in le16 but the array below is
48 * indexed by command in host byte order.
49 */
50static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
51 /* SMB2_NEGOTIATE */ 36,
52 /* SMB2_SESSION_SETUP */ 25,
53 /* SMB2_LOGOFF */ 4,
54 /* SMB2_TREE_CONNECT */ 9,
55 /* SMB2_TREE_DISCONNECT */ 4,
56 /* SMB2_CREATE */ 57,
57 /* SMB2_CLOSE */ 24,
58 /* SMB2_FLUSH */ 24,
59 /* SMB2_READ */ 49,
60 /* SMB2_WRITE */ 49,
61 /* SMB2_LOCK */ 48,
62 /* SMB2_IOCTL */ 57,
63 /* SMB2_CANCEL */ 4,
64 /* SMB2_ECHO */ 4,
65 /* SMB2_QUERY_DIRECTORY */ 33,
66 /* SMB2_CHANGE_NOTIFY */ 32,
67 /* SMB2_QUERY_INFO */ 41,
68 /* SMB2_SET_INFO */ 33,
69 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
70};
71
Ronnie Sahlberg730928c2018-08-08 15:07:49 +100072int smb3_encryption_required(const struct cifs_tcon *tcon)
Pavel Shilovsky7fb89862016-10-31 13:49:30 -070073{
Steve Frenchedb16132020-05-31 14:36:56 -050074 if (!tcon || !tcon->ses)
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -080075 return 0;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -070076 if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
77 (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
78 return 1;
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -080079 if (tcon->seal &&
80 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
81 return 1;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -070082 return 0;
83}
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040084
85static void
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +090086smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
Aurelien Aptel352d96f2020-05-31 12:38:22 -050087 const struct cifs_tcon *tcon,
88 struct TCP_Server_Info *server)
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040089{
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070090 shdr->ProtocolId = SMB2_PROTO_NUMBER;
91 shdr->StructureSize = cpu_to_le16(64);
92 shdr->Command = smb2_cmd;
Aurelien Aptel352d96f2020-05-31 12:38:22 -050093 if (server) {
Ross Lagerwall7d414f32016-09-20 13:37:13 +010094 spin_lock(&server->req_lock);
Steve French69dc4b12019-03-05 21:04:56 -060095 /* Request up to 10 credits but don't go over the limit. */
Steve French141891f2016-09-23 00:44:16 -050096 if (server->credits >= server->max_credits)
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070097 shdr->CreditRequest = cpu_to_le16(0);
Ross Lagerwall7d414f32016-09-20 13:37:13 +010098 else
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070099 shdr->CreditRequest = cpu_to_le16(
Steve French141891f2016-09-23 00:44:16 -0500100 min_t(int, server->max_credits -
Steve French69dc4b12019-03-05 21:04:56 -0600101 server->credits, 10));
Ross Lagerwall7d414f32016-09-20 13:37:13 +0100102 spin_unlock(&server->req_lock);
103 } else {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700104 shdr->CreditRequest = cpu_to_le16(2);
Ross Lagerwall7d414f32016-09-20 13:37:13 +0100105 }
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900106 shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400107
108 if (!tcon)
109 goto out;
110
Steve French2b80d042013-06-23 18:43:37 -0500111 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
112 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500113 if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700114 shdr->CreditCharge = cpu_to_le16(1);
Steve French2b80d042013-06-23 18:43:37 -0500115 /* else CreditCharge MBZ */
116
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900117 shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400118 /* Uid is not converted */
119 if (tcon->ses)
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900120 shdr->SessionId = cpu_to_le64(tcon->ses->Suid);
Steve Frenchf87ab882013-06-26 19:14:55 -0500121
122 /*
123 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
124 * to pass the path on the Open SMB prefixed by \\server\share.
125 * Not sure when we would need to do the augmented path (if ever) and
126 * setting this flag breaks the SMB2 open operation since it is
127 * illegal to send an empty path name (without \\server\share prefix)
128 * when the DFS flag is set in the SMB open header. We could
129 * consider setting the flag on all operations other than open
130 * but it is safer to net set it for now.
131 */
132/* if (tcon->share_flags & SHI1005_FLAGS_DFS)
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700133 shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
Steve Frenchf87ab882013-06-26 19:14:55 -0500134
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500135 if (server && server->sign && !smb3_encryption_required(tcon))
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700136 shdr->Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400137out:
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400138 return;
139}
140
141static int
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500142smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
143 struct TCP_Server_Info *server)
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400144{
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300145 int rc;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400146 struct nls_table *nls_codepage;
147 struct cifs_ses *ses;
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200148 int retries;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400149
150 /*
151 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
152 * check for tcp and smb session status done differently
153 * for those three - in the calling routine.
154 */
155 if (tcon == NULL)
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300156 return 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400157
Paulo Alcantarac88f7dc2021-11-03 13:53:29 -0300158 /*
159 * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in
160 * cifs_tree_connect().
161 */
162 if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300163 return 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400164
165 if (tcon->tidStatus == CifsExiting) {
166 /*
167 * only tree disconnect, open, and write,
168 * (and ulogoff which does not have tcon)
169 * are allowed as we start force umount.
170 */
171 if ((smb2_command != SMB2_WRITE) &&
172 (smb2_command != SMB2_CREATE) &&
173 (smb2_command != SMB2_TREE_DISCONNECT)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500174 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
175 smb2_command);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400176 return -ENODEV;
177 }
178 }
179 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500180 (!tcon->ses->server) || !server)
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400181 return -EIO;
182
183 ses = tcon->ses;
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200184 retries = server->nr_targets;
185
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400186 /*
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200187 * Give demultiplex thread up to 10 seconds to each target available for
188 * reconnect -- should be greater than cifs socket timeout which is 7
189 * seconds.
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400190 */
191 while (server->tcpStatus == CifsNeedReconnect) {
192 /*
193 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
194 * here since they are implicitly done when session drops.
195 */
196 switch (smb2_command) {
197 /*
198 * BB Should we keep oplock break and add flush to exceptions?
199 */
200 case SMB2_TREE_DISCONNECT:
201 case SMB2_CANCEL:
202 case SMB2_CLOSE:
203 case SMB2_OPLOCK_BREAK:
204 return -EAGAIN;
205 }
206
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300207 rc = wait_event_interruptible_timeout(server->response_q,
208 (server->tcpStatus != CifsNeedReconnect),
209 10 * HZ);
210 if (rc < 0) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700211 cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
212 __func__);
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300213 return -ERESTARTSYS;
214 }
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400215
216 /* are we still trying to reconnect? */
217 if (server->tcpStatus != CifsNeedReconnect)
218 break;
219
Ronnie Sahlbergc54849d2020-01-31 05:52:51 +1000220 if (retries && --retries)
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200221 continue;
222
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400223 /*
224 * on "soft" mounts we wait once. Hard mounts keep
225 * retrying until process is killed or server comes
226 * back on-line
227 */
228 if (!tcon->retry) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500229 cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400230 return -EHOSTDOWN;
231 }
Paulo Alcantaraa3a53b72018-11-14 17:20:31 -0200232 retries = server->nr_targets;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400233 }
234
235 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
Paulo Alcantara7ffbe652018-07-05 13:46:34 -0300236 return 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400237
238 nls_codepage = load_nls_default();
239
240 /*
241 * need to prevent multiple threads trying to simultaneously reconnect
242 * the same SMB session
243 */
244 mutex_lock(&tcon->ses->session_mutex);
Samuel Cabrero76e75272017-07-11 12:44:39 +0200245
246 /*
247 * Recheck after acquire mutex. If another thread is negotiating
248 * and the server never sends an answer the socket will be closed
249 * and tcpStatus set to reconnect.
250 */
251 if (server->tcpStatus == CifsNeedReconnect) {
252 rc = -EHOSTDOWN;
253 mutex_unlock(&tcon->ses->session_mutex);
254 goto out;
255 }
256
Aurelien Aptel2f589672020-04-24 16:55:31 +0200257 /*
258 * If we are reconnecting an extra channel, bind
259 */
260 if (server->is_channel) {
261 ses->binding = true;
262 ses->binding_chan = cifs_ses_find_chan(ses, server);
263 }
264
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400265 rc = cifs_negotiate_protocol(0, tcon->ses);
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000266 if (!rc && tcon->ses->need_reconnect) {
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400267 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000268 if ((rc == -EACCES) && !tcon->retry) {
269 rc = -EHOSTDOWN;
Aurelien Aptel2f589672020-04-24 16:55:31 +0200270 ses->binding = false;
271 ses->binding_chan = NULL;
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000272 mutex_unlock(&tcon->ses->session_mutex);
273 goto failed;
274 }
275 }
Aurelien Aptel2f589672020-04-24 16:55:31 +0200276 /*
277 * End of channel binding
278 */
279 ses->binding = false;
280 ses->binding_chan = NULL;
281
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400282 if (rc || !tcon->need_reconnect) {
283 mutex_unlock(&tcon->ses->session_mutex);
284 goto out;
285 }
286
287 cifs_mark_open_files_invalid(tcon);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800288 if (tcon->use_persistent)
289 tcon->need_reopen_files = true;
Steve French52ace1e2016-09-22 19:23:56 -0500290
Stefan Metzmacher565674d2020-07-21 09:36:38 -0300291 rc = cifs_tree_connect(0, tcon, nls_codepage);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400292 mutex_unlock(&tcon->ses->session_mutex);
Steve French52ace1e2016-09-22 19:23:56 -0500293
Joe Perchesf96637b2013-05-04 22:12:25 -0500294 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
Steve Frenchc318e6c2018-04-04 14:08:52 -0500295 if (rc) {
296 /* If sess reconnected but tcon didn't, something strange ... */
Joe Perchesa0a30362020-04-14 22:42:53 -0700297 pr_warn_once("reconnect tcon failed rc = %d\n", rc);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400298 goto out;
Steve Frenchc318e6c2018-04-04 14:08:52 -0500299 }
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800300
301 if (smb2_command != SMB2_INTERNAL_CMD)
Stefan Metzmacherb08484d2020-02-24 14:14:59 +0100302 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800303
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400304 atomic_inc(&tconInfoReconnectCount);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400305out:
306 /*
307 * Check if handle based operation so we know whether we can continue
308 * or not without returning to caller to reset file handle.
309 */
310 /*
311 * BB Is flush done by server on drop of tcp session? Should we special
312 * case it and skip above?
313 */
314 switch (smb2_command) {
315 case SMB2_FLUSH:
316 case SMB2_READ:
317 case SMB2_WRITE:
318 case SMB2_LOCK:
319 case SMB2_IOCTL:
320 case SMB2_QUERY_DIRECTORY:
321 case SMB2_CHANGE_NOTIFY:
322 case SMB2_QUERY_INFO:
323 case SMB2_SET_INFO:
Pavel Shilovsky4772c792016-11-29 11:30:58 -0800324 rc = -EAGAIN;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400325 }
Ronnie Sahlbergb0dd9402020-02-05 11:08:01 +1000326failed:
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400327 unload_nls(nls_codepage);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400328 return rc;
329}
330
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700331static void
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500332fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
333 struct TCP_Server_Info *server,
334 void *buf,
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700335 unsigned int *total_len)
336{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900337 struct smb2_pdu *spdu = (struct smb2_pdu *)buf;
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700338 /* lookup word count ie StructureSize from table */
339 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
340
341 /*
342 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
343 * largest operations (Create)
344 */
345 memset(buf, 0, 256);
346
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900347 smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server);
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700348 spdu->StructureSize2 = cpu_to_le16(parmsize);
349
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900350 *total_len = parmsize + sizeof(struct smb2_hdr);
Pavel Shilovskycb200bd2016-10-24 16:59:57 -0700351}
352
Ronnie Sahlberg305428a2017-11-21 11:04:42 +1100353/*
354 * Allocate and return pointer to an SMB request hdr, and set basic
355 * SMB information in the SMB header. If the return code is zero, this
356 * function must have filled in request_buf pointer.
357 */
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300358static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500359 struct TCP_Server_Info *server,
360 void **request_buf, unsigned int *total_len)
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -0800361{
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -0800362 /* BB eventually switch this to SMB2 specific small buf size */
Stefano Briviof46ecbd2018-07-05 11:46:42 +0200363 if (smb2_command == SMB2_SET_INFO)
364 *request_buf = cifs_buf_get();
365 else
366 *request_buf = cifs_small_buf_get();
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -0800367 if (*request_buf == NULL) {
368 /* BB should we add a retry in here if not a writepage? */
369 return -ENOMEM;
370 }
371
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500372 fill_small_buf(smb2_command, tcon, server,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900373 (struct smb2_hdr *)(*request_buf),
Ronnie Sahlberg305428a2017-11-21 11:04:42 +1100374 total_len);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400375
376 if (tcon != NULL) {
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400377 uint16_t com_code = le16_to_cpu(smb2_command);
378 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400379 cifs_stats_inc(&tcon->num_smbs_sent);
380 }
381
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300382 return 0;
383}
384
385static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500386 struct TCP_Server_Info *server,
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300387 void **request_buf, unsigned int *total_len)
388{
389 int rc;
390
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500391 rc = smb2_reconnect(smb2_command, tcon, server);
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300392 if (rc)
393 return rc;
394
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500395 return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300396 total_len);
397}
398
399static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500400 struct TCP_Server_Info *server,
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300401 void **request_buf, unsigned int *total_len)
402{
403 /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
404 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500405 return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
406 request_buf, total_len);
Paulo Alcantara (SUSE)84a1f5b2019-11-22 12:30:53 -0300407 }
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500408 return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
409 request_buf, total_len);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400410}
411
Steve Frenchd7bef4c2019-04-18 11:03:58 -0500412/* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500413
414static void
415build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
416{
417 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
418 pneg_ctxt->DataLength = cpu_to_le16(38);
419 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
Ronnie Sahlbergfc0b3842021-09-08 12:10:13 +1000420 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
421 get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500422 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
423}
424
425static void
Steve French26ea8882019-04-26 20:36:08 -0700426build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
427{
428 pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
429 pneg_ctxt->DataLength =
430 cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
431 - sizeof(struct smb2_neg_context));
432 pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3);
433 pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77;
434 pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF;
435 pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
436}
437
Steve French53d31a32021-07-05 15:05:39 -0500438static unsigned int
439build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
440{
441 unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities);
442 unsigned short num_algs = 1; /* number of signing algorithms sent */
443
444 pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
445 /*
446 * Context Data length must be rounded to multiple of 8 for some servers
447 */
448 pneg_ctxt->DataLength = cpu_to_le16(DIV_ROUND_UP(
449 sizeof(struct smb2_signing_capabilities) -
450 sizeof(struct smb2_neg_context) +
451 (num_algs * 2 /* sizeof u16 */), 8) * 8);
452 pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
453 pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
454
455 ctxt_len += 2 /* sizeof le16 */ * num_algs;
456 ctxt_len = DIV_ROUND_UP(ctxt_len, 8) * 8;
457 return ctxt_len;
458 /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
459}
460
Steve French26ea8882019-04-26 20:36:08 -0700461static void
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500462build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
463{
464 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
Steve Frenchfbfd0b42020-09-11 16:19:28 -0500465 if (require_gcm_256) {
466 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */
467 pneg_ctxt->CipherCount = cpu_to_le16(1);
468 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM;
Steve French29e27922020-10-14 20:24:09 -0500469 } else if (enable_gcm_256) {
470 pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */
471 pneg_ctxt->CipherCount = cpu_to_le16(3);
472 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
473 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM;
474 pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM;
Steve Frenchfbfd0b42020-09-11 16:19:28 -0500475 } else {
476 pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */
477 pneg_ctxt->CipherCount = cpu_to_le16(2);
478 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
479 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
480 }
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500481}
482
Steve French96d3cca2019-06-25 04:39:51 -0500483static unsigned int
484build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
485{
486 struct nls_table *cp = load_nls_default();
487
488 pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID;
489
490 /* copy up to max of first 100 bytes of server name to NetName field */
Steve Frenchdf58fae2019-08-05 17:07:26 -0500491 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
Steve French96d3cca2019-06-25 04:39:51 -0500492 /* context size is DataLength + minimal smb2_neg_context */
493 return DIV_ROUND_UP(le16_to_cpu(pneg_ctxt->DataLength) +
494 sizeof(struct smb2_neg_context), 8) * 8;
495}
496
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500497static void
Steve Frenchfcef0db2018-05-19 20:45:27 -0500498build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
499{
500 pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
501 pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
Steve French0d481322019-02-24 17:56:33 -0600502 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
503 pneg_ctxt->Name[0] = 0x93;
504 pneg_ctxt->Name[1] = 0xAD;
505 pneg_ctxt->Name[2] = 0x25;
506 pneg_ctxt->Name[3] = 0x50;
507 pneg_ctxt->Name[4] = 0x9C;
508 pneg_ctxt->Name[5] = 0xB4;
509 pneg_ctxt->Name[6] = 0x11;
510 pneg_ctxt->Name[7] = 0xE7;
511 pneg_ctxt->Name[8] = 0xB4;
512 pneg_ctxt->Name[9] = 0x23;
513 pneg_ctxt->Name[10] = 0x83;
514 pneg_ctxt->Name[11] = 0xDE;
515 pneg_ctxt->Name[12] = 0x96;
516 pneg_ctxt->Name[13] = 0x8B;
517 pneg_ctxt->Name[14] = 0xCD;
518 pneg_ctxt->Name[15] = 0x7C;
Steve Frenchfcef0db2018-05-19 20:45:27 -0500519}
520
521static void
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100522assemble_neg_contexts(struct smb2_negotiate_req *req,
Steve French9fe5ff12019-06-24 20:39:04 -0500523 struct TCP_Server_Info *server, unsigned int *total_len)
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500524{
Colin Ian Kinga9f76cf2019-12-02 18:59:42 +0000525 char *pneg_ctxt;
Steve French53d31a32021-07-05 15:05:39 -0500526 unsigned int ctxt_len, neg_context_count;
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500527
Steve Frenchd5c70762019-01-03 02:37:21 -0600528 if (*total_len > 200) {
529 /* In case length corrupted don't want to overrun smb buffer */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000530 cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n");
Steve Frenchd5c70762019-01-03 02:37:21 -0600531 return;
532 }
533
534 /*
535 * round up total_len of fixed part of SMB3 negotiate request to 8
536 * byte boundary before adding negotiate contexts
537 */
538 *total_len = roundup(*total_len, 8);
539
540 pneg_ctxt = (*total_len) + (char *)req;
541 req->NegotiateContextOffset = cpu_to_le32(*total_len);
542
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500543 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
Steve Frenchfcef0db2018-05-19 20:45:27 -0500544 ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_preauth_neg_context), 8) * 8;
545 *total_len += ctxt_len;
546 pneg_ctxt += ctxt_len;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100547
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500548 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
Steve Frenchfcef0db2018-05-19 20:45:27 -0500549 ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_encryption_neg_context), 8) * 8;
550 *total_len += ctxt_len;
551 pneg_ctxt += ctxt_len;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100552
Steve French53d31a32021-07-05 15:05:39 -0500553 ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
554 server->hostname);
555 *total_len += ctxt_len;
556 pneg_ctxt += ctxt_len;
557
558 build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
559 *total_len += sizeof(struct smb2_posix_neg_context);
560 pneg_ctxt += sizeof(struct smb2_posix_neg_context);
561
562 neg_context_count = 4;
563
Steve French9fe5ff12019-06-24 20:39:04 -0500564 if (server->compress_algorithm) {
565 build_compression_ctxt((struct smb2_compression_capabilities_context *)
Steve French26ea8882019-04-26 20:36:08 -0700566 pneg_ctxt);
Steve French9fe5ff12019-06-24 20:39:04 -0500567 ctxt_len = DIV_ROUND_UP(
568 sizeof(struct smb2_compression_capabilities_context),
569 8) * 8;
570 *total_len += ctxt_len;
571 pneg_ctxt += ctxt_len;
Steve French53d31a32021-07-05 15:05:39 -0500572 neg_context_count++;
573 }
Steve French96d3cca2019-06-25 04:39:51 -0500574
Steve French53d31a32021-07-05 15:05:39 -0500575 if (enable_negotiate_signing) {
576 ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *)
577 pneg_ctxt);
578 *total_len += ctxt_len;
579 pneg_ctxt += ctxt_len;
580 neg_context_count++;
581 }
Steve French96d3cca2019-06-25 04:39:51 -0500582
Steve French53d31a32021-07-05 15:05:39 -0500583 /* check for and add transport_capabilities and signing capabilities */
584 req->NegotiateContextCount = cpu_to_le16(neg_context_count);
585
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500586}
Steve French5100d8a2018-04-09 10:47:14 -0500587
588static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
589{
590 unsigned int len = le16_to_cpu(ctxt->DataLength);
591
592 /* If invalid preauth context warn but use what we requested, SHA-512 */
593 if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700594 pr_warn_once("server sent bad preauth context\n");
Steve French5100d8a2018-04-09 10:47:14 -0500595 return;
Steve French7955f102020-12-09 22:19:00 -0600596 } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
597 pr_warn_once("server sent invalid SaltLength\n");
598 return;
Steve French5100d8a2018-04-09 10:47:14 -0500599 }
600 if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
Joe Perchesa0a30362020-04-14 22:42:53 -0700601 pr_warn_once("Invalid SMB3 hash algorithm count\n");
Steve French5100d8a2018-04-09 10:47:14 -0500602 if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
Joe Perchesa0a30362020-04-14 22:42:53 -0700603 pr_warn_once("unknown SMB3 hash algorithm\n");
Steve French5100d8a2018-04-09 10:47:14 -0500604}
605
Steve French26ea8882019-04-26 20:36:08 -0700606static void decode_compress_ctx(struct TCP_Server_Info *server,
607 struct smb2_compression_capabilities_context *ctxt)
608{
609 unsigned int len = le16_to_cpu(ctxt->DataLength);
610
611 /* sizeof compress context is a one element compression capbility struct */
612 if (len < 10) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700613 pr_warn_once("server sent bad compression cntxt\n");
Steve French26ea8882019-04-26 20:36:08 -0700614 return;
615 }
616 if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700617 pr_warn_once("Invalid SMB3 compress algorithm count\n");
Steve French26ea8882019-04-26 20:36:08 -0700618 return;
619 }
620 if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700621 pr_warn_once("unknown compression algorithm\n");
Steve French26ea8882019-04-26 20:36:08 -0700622 return;
623 }
624 server->compress_algorithm = ctxt->CompressionAlgorithms[0];
625}
626
Steve French5100d8a2018-04-09 10:47:14 -0500627static int decode_encrypt_ctx(struct TCP_Server_Info *server,
628 struct smb2_encryption_neg_context *ctxt)
629{
630 unsigned int len = le16_to_cpu(ctxt->DataLength);
631
632 cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
633 if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700634 pr_warn_once("server sent bad crypto ctxt len\n");
Steve French5100d8a2018-04-09 10:47:14 -0500635 return -EINVAL;
636 }
637
638 if (le16_to_cpu(ctxt->CipherCount) != 1) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700639 pr_warn_once("Invalid SMB3.11 cipher count\n");
Steve French5100d8a2018-04-09 10:47:14 -0500640 return -EINVAL;
641 }
642 cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
Steve French511ac892020-10-15 00:14:47 -0500643 if (require_gcm_256) {
644 if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) {
645 cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n");
646 return -EOPNOTSUPP;
647 }
648 } else if (ctxt->Ciphers[0] == 0) {
Steve Frenchacf96fe2020-10-17 03:54:27 -0500649 /*
650 * e.g. if server only supported AES256_CCM (very unlikely)
651 * or server supported no encryption types or had all disabled.
652 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case
653 * in which mount requested encryption ("seal") checks later
654 * on during tree connection will return proper rc, but if
655 * seal not requested by client, since server is allowed to
656 * return 0 to indicate no supported cipher, we can't fail here
657 */
658 server->cipher_type = 0;
659 server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION;
660 pr_warn_once("Server does not support requested encryption types\n");
661 return 0;
Steve French511ac892020-10-15 00:14:47 -0500662 } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
663 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) &&
664 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) {
665 /* server returned a cipher we didn't ask for */
Joe Perchesa0a30362020-04-14 22:42:53 -0700666 pr_warn_once("Invalid SMB3.11 cipher returned\n");
Steve French5100d8a2018-04-09 10:47:14 -0500667 return -EINVAL;
668 }
669 server->cipher_type = ctxt->Ciphers[0];
Steve French23657ad2018-04-22 15:14:58 -0500670 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
Steve French5100d8a2018-04-09 10:47:14 -0500671 return 0;
672}
673
Steve French53d31a32021-07-05 15:05:39 -0500674static void decode_signing_ctx(struct TCP_Server_Info *server,
675 struct smb2_signing_capabilities *pctxt)
676{
677 unsigned int len = le16_to_cpu(pctxt->DataLength);
678
679 if ((len < 4) || (len > 16)) {
680 pr_warn_once("server sent bad signing negcontext\n");
681 return;
682 }
683 if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) {
684 pr_warn_once("Invalid signing algorithm count\n");
685 return;
686 }
687 if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) {
688 pr_warn_once("unknown signing algorithm\n");
689 return;
690 }
691
692 server->signing_negotiated = true;
693 server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]);
694 cifs_dbg(FYI, "signing algorithm %d chosen\n",
695 server->signing_algorithm);
696}
697
698
Steve French5100d8a2018-04-09 10:47:14 -0500699static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +1000700 struct TCP_Server_Info *server,
701 unsigned int len_of_smb)
Steve French5100d8a2018-04-09 10:47:14 -0500702{
703 struct smb2_neg_context *pctx;
704 unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
705 unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
Steve French5100d8a2018-04-09 10:47:14 -0500706 unsigned int len_of_ctxts, i;
707 int rc = 0;
708
709 cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
710 if (len_of_smb <= offset) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000711 cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n");
Steve French5100d8a2018-04-09 10:47:14 -0500712 return -EINVAL;
713 }
714
715 len_of_ctxts = len_of_smb - offset;
716
717 for (i = 0; i < ctxt_cnt; i++) {
718 int clen;
719 /* check that offset is not beyond end of SMB */
720 if (len_of_ctxts == 0)
721 break;
722
723 if (len_of_ctxts < sizeof(struct smb2_neg_context))
724 break;
725
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000726 pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
Steve French5100d8a2018-04-09 10:47:14 -0500727 clen = le16_to_cpu(pctx->DataLength);
728 if (clen > len_of_ctxts)
729 break;
730
731 if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
732 decode_preauth_context(
733 (struct smb2_preauth_neg_context *)pctx);
734 else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
735 rc = decode_encrypt_ctx(server,
736 (struct smb2_encryption_neg_context *)pctx);
Steve French26ea8882019-04-26 20:36:08 -0700737 else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES)
738 decode_compress_ctx(server,
739 (struct smb2_compression_capabilities_context *)pctx);
Steve Frenchfcef0db2018-05-19 20:45:27 -0500740 else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
741 server->posix_ext_supported = true;
Steve French53d31a32021-07-05 15:05:39 -0500742 else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES)
743 decode_signing_ctx(server,
744 (struct smb2_signing_capabilities *)pctx);
Steve French5100d8a2018-04-09 10:47:14 -0500745 else
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000746 cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
Steve French5100d8a2018-04-09 10:47:14 -0500747 le16_to_cpu(pctx->ContextType));
748
749 if (rc)
750 break;
751 /* offsets must be 8 byte aligned */
752 clen = (clen + 7) & ~0x7;
753 offset += clen + sizeof(struct smb2_neg_context);
754 len_of_ctxts -= clen;
755 }
756 return rc;
757}
758
Steve Frenchce558b02018-05-31 19:16:54 -0500759static struct create_posix *
760create_posix_buf(umode_t mode)
761{
762 struct create_posix *buf;
763
764 buf = kzalloc(sizeof(struct create_posix),
765 GFP_KERNEL);
766 if (!buf)
767 return NULL;
768
769 buf->ccontext.DataOffset =
770 cpu_to_le16(offsetof(struct create_posix, Mode));
771 buf->ccontext.DataLength = cpu_to_le32(4);
772 buf->ccontext.NameOffset =
773 cpu_to_le16(offsetof(struct create_posix, Name));
774 buf->ccontext.NameLength = cpu_to_le16(16);
775
776 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
777 buf->Name[0] = 0x93;
778 buf->Name[1] = 0xAD;
779 buf->Name[2] = 0x25;
780 buf->Name[3] = 0x50;
781 buf->Name[4] = 0x9C;
782 buf->Name[5] = 0xB4;
783 buf->Name[6] = 0x11;
784 buf->Name[7] = 0xE7;
785 buf->Name[8] = 0xB4;
786 buf->Name[9] = 0x23;
787 buf->Name[10] = 0x83;
788 buf->Name[11] = 0xDE;
789 buf->Name[12] = 0x96;
790 buf->Name[13] = 0x8B;
791 buf->Name[14] = 0xCD;
792 buf->Name[15] = 0x7C;
793 buf->Mode = cpu_to_le32(mode);
Joe Perchesa0a30362020-04-14 22:42:53 -0700794 cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
Steve Frenchce558b02018-05-31 19:16:54 -0500795 return buf;
796}
797
798static int
799add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
800{
801 struct smb2_create_req *req = iov[0].iov_base;
802 unsigned int num = *num_iovec;
803
804 iov[num].iov_base = create_posix_buf(mode);
Steve Frenchd0959b02019-10-05 10:53:58 -0500805 if (mode == ACL_NO_MODE)
Joe Perchesa0a30362020-04-14 22:42:53 -0700806 cifs_dbg(FYI, "Invalid mode\n");
Steve Frenchce558b02018-05-31 19:16:54 -0500807 if (iov[num].iov_base == NULL)
808 return -ENOMEM;
809 iov[num].iov_len = sizeof(struct create_posix);
810 if (!req->CreateContextsOffset)
811 req->CreateContextsOffset = cpu_to_le32(
812 sizeof(struct smb2_create_req) +
813 iov[num - 1].iov_len);
814 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_posix));
815 *num_iovec = num + 1;
816 return 0;
817}
818
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500819
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400820/*
821 *
822 * SMB2 Worker functions follow:
823 *
824 * The general structure of the worker functions is:
825 * 1) Call smb2_init (assembles SMB2 header)
826 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
827 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
828 * 4) Decode SMB2 command specific fields in the fixed length area
829 * 5) Decode variable length data area (if any for this SMB2 command type)
830 * 6) Call free smb buffer
831 * 7) return
832 *
833 */
834
835int
836SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
837{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +1000838 struct smb_rqst rqst;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400839 struct smb2_negotiate_req *req;
840 struct smb2_negotiate_rsp *rsp;
841 struct kvec iov[1];
Pavel Shilovskyda502f72016-10-25 11:38:47 -0700842 struct kvec rsp_iov;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400843 int rc = 0;
844 int resp_buftype;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200845 struct TCP_Server_Info *server = cifs_ses_server(ses);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400846 int blob_offset, blob_length;
847 char *security_blob;
848 int flags = CIFS_NEG_OP;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100849 unsigned int total_len;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400850
Joe Perchesf96637b2013-05-04 22:12:25 -0500851 cifs_dbg(FYI, "Negotiate protocol\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400852
Jeff Layton3534b852013-05-24 07:41:01 -0400853 if (!server) {
854 WARN(1, "%s: server is NULL!\n", __func__);
855 return -EIO;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400856 }
857
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500858 rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
859 (void **) &req, &total_len);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400860 if (rc)
861 return rc;
862
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900863 req->hdr.SessionId = 0;
Steve French0fdfef92018-06-28 19:30:23 -0500864
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100865 memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
866 memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400867
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200868 if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500869 SMB3ANY_VERSION_STRING) == 0) {
870 req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
871 req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
Steve French6dffa4c2021-02-02 00:03:58 -0600872 req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
873 req->DialectCount = cpu_to_le16(3);
874 total_len += 6;
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000875 } else if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500876 SMBDEFAULT_VERSION_STRING) == 0) {
877 req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
878 req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
879 req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
Steve Frenchd5c70762019-01-03 02:37:21 -0600880 req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
881 req->DialectCount = cpu_to_le16(4);
882 total_len += 8;
Steve French9764c022017-09-17 10:41:35 -0500883 } else {
884 /* otherwise send specific dialect */
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200885 req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
Steve French9764c022017-09-17 10:41:35 -0500886 req->DialectCount = cpu_to_le16(1);
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100887 total_len += 2;
Steve French9764c022017-09-17 10:41:35 -0500888 }
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400889
890 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400891 if (ses->sign)
Steve French9cd2e622013-06-12 19:59:03 -0500892 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400893 else if (global_secflags & CIFSSEC_MAY_SIGN)
Steve French9cd2e622013-06-12 19:59:03 -0500894 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400895 else
896 req->SecurityMode = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400897
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000898 req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
Steve French679971e2021-05-07 18:24:11 -0500899 if (ses->chan_max > 1)
900 req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400901
Steve French3c5f9be12014-05-13 13:37:45 -0700902 /* ClientGUID must be zero for SMB2.02 dialect */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000903 if (server->vals->protocol_id == SMB20_PROT_ID)
Steve French3c5f9be12014-05-13 13:37:45 -0700904 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500905 else {
Steve French3c5f9be12014-05-13 13:37:45 -0700906 memcpy(req->ClientGUID, server->client_guid,
907 SMB2_CLIENT_GUID_SIZE);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000908 if ((server->vals->protocol_id == SMB311_PROT_ID) ||
909 (strcmp(server->vals->version_string,
Steve French6dffa4c2021-02-02 00:03:58 -0600910 SMB3ANY_VERSION_STRING) == 0) ||
911 (strcmp(server->vals->version_string,
Steve Frenchd5c70762019-01-03 02:37:21 -0600912 SMBDEFAULT_VERSION_STRING) == 0))
Steve French9fe5ff12019-06-24 20:39:04 -0500913 assemble_neg_contexts(req, server, &total_len);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500914 }
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400915 iov[0].iov_base = (char *)req;
Ronnie Sahlberg13cacea2017-11-20 11:24:30 +1100916 iov[0].iov_len = total_len;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400917
Ronnie Sahlberg40eff452018-06-12 08:00:59 +1000918 memset(&rqst, 0, sizeof(struct smb_rqst));
919 rqst.rq_iov = iov;
920 rqst.rq_nvec = 1;
921
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500922 rc = cifs_send_recv(xid, ses, server,
923 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -0700924 cifs_small_buf_release(req);
925 rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400926 /*
927 * No tcon so can't do
928 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
929 */
Steve French7e682f72017-08-31 21:34:24 -0500930 if (rc == -EOPNOTSUPP) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700931 cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
Steve French7e682f72017-08-31 21:34:24 -0500932 goto neg_exit;
933 } else if (rc != 0)
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400934 goto neg_exit;
935
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000936 if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500937 SMB3ANY_VERSION_STRING) == 0) {
938 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000939 cifs_server_dbg(VFS,
Steve French9764c022017-09-17 10:41:35 -0500940 "SMB2 dialect returned but not requested\n");
941 return -EIO;
942 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000943 cifs_server_dbg(VFS,
Steve French9764c022017-09-17 10:41:35 -0500944 "SMB2.1 dialect returned but not requested\n");
945 return -EIO;
Steve French6dffa4c2021-02-02 00:03:58 -0600946 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
947 /* ops set to 3.0 by default for default so update */
948 server->ops = &smb311_operations;
949 server->vals = &smb311_values;
Steve French9764c022017-09-17 10:41:35 -0500950 }
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000951 } else if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -0500952 SMBDEFAULT_VERSION_STRING) == 0) {
953 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000954 cifs_server_dbg(VFS,
Steve French9764c022017-09-17 10:41:35 -0500955 "SMB2 dialect returned but not requested\n");
956 return -EIO;
957 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
958 /* ops set to 3.0 by default for default so update */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000959 server->ops = &smb21_operations;
960 server->vals = &smb21_values;
ZhangXiaoxub57a55e2019-04-06 15:30:38 +0800961 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000962 server->ops = &smb311_operations;
963 server->vals = &smb311_values;
ZhangXiaoxub57a55e2019-04-06 15:30:38 +0800964 }
Steve French590d08d2017-09-19 11:43:47 -0500965 } else if (le16_to_cpu(rsp->DialectRevision) !=
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000966 server->vals->protocol_id) {
Steve French9764c022017-09-17 10:41:35 -0500967 /* if requested single dialect ensure returned dialect matched */
Joe Perchesa0a30362020-04-14 22:42:53 -0700968 cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
969 le16_to_cpu(rsp->DialectRevision));
Steve French9764c022017-09-17 10:41:35 -0500970 return -EIO;
971 }
972
Joe Perchesf96637b2013-05-04 22:12:25 -0500973 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400974
Steve Frenche4aa25e2012-10-01 12:26:22 -0500975 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500976 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500977 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500978 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500979 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500980 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
Steve French20b6d8b2013-06-12 22:48:41 -0500981 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
982 cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
Steve French5f7fbf72014-12-17 22:52:58 -0600983 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
984 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400985 else {
Joe Perchesa0a30362020-04-14 22:42:53 -0700986 cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
987 le16_to_cpu(rsp->DialectRevision));
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400988 rc = -EIO;
989 goto neg_exit;
990 }
991 server->dialect = le16_to_cpu(rsp->DialectRevision);
992
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100993 /*
994 * Keep a copy of the hash after negprot. This hash will be
995 * the starting hash value for all sessions made from this
996 * server.
997 */
998 memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
999 SMB2_PREAUTH_HASH_SIZE);
Steve French0fdfef92018-06-28 19:30:23 -05001000
Jeff Laytone598d1d82013-05-26 07:00:59 -04001001 /* SMB2 only has an extended negflavor */
1002 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
Pavel Shilovsky2365c4e2014-02-14 13:31:02 +04001003 /* set it to the maximum buffer size value we can send with 1 credit */
1004 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
1005 SMB2_MAX_BUFFER_SIZE);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001006 server->max_read = le32_to_cpu(rsp->MaxReadSize);
1007 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001008 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
Steve French07108d02018-04-01 20:15:55 -05001009 if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
1010 cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
1011 server->sec_mode);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001012 server->capabilities = le32_to_cpu(rsp->Capabilities);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001013 /* Internal types */
1014 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001015
Aurelien Aptel6d2fcfe2021-05-21 17:19:27 +02001016 /*
1017 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
1018 * Set the cipher type manually.
1019 */
1020 if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
1021 server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
1022
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001023 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001024 (struct smb2_hdr *)rsp);
Steve French5d875cc2013-06-25 15:33:41 -05001025 /*
1026 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
1027 * for us will be
1028 * ses->sectype = RawNTLMSSP;
1029 * but for time being this is our only auth choice so doesn't matter.
1030 * We just found a server which sets blob length to zero expecting raw.
1031 */
Pavel Shilovsky67dbea22017-04-12 13:32:07 -07001032 if (blob_length == 0) {
Steve French5d875cc2013-06-25 15:33:41 -05001033 cifs_dbg(FYI, "missing security blob on negprot\n");
Pavel Shilovsky67dbea22017-04-12 13:32:07 -07001034 server->sec_ntlmssp = true;
1035 }
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001036
Jeff Layton38d77c52013-05-26 07:01:00 -04001037 rc = cifs_enable_signing(server, ses->sign);
Jeff Layton9ddec562013-05-26 07:00:58 -04001038 if (rc)
1039 goto neg_exit;
Steve Frenchceb1b0b2015-09-24 00:52:37 -05001040 if (blob_length) {
Steve Frenchebdd2072014-10-20 12:48:23 -05001041 rc = decode_negTokenInit(security_blob, blob_length, server);
Steve Frenchceb1b0b2015-09-24 00:52:37 -05001042 if (rc == 1)
1043 rc = 0;
1044 else if (rc == 0)
1045 rc = -EIO;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001046 }
Steve French5100d8a2018-04-09 10:47:14 -05001047
Steve French5100d8a2018-04-09 10:47:14 -05001048 if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
1049 if (rsp->NegotiateContextCount)
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10001050 rc = smb311_decode_neg_context(rsp, server,
1051 rsp_iov.iov_len);
Steve French5100d8a2018-04-09 10:47:14 -05001052 else
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001053 cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
Steve French5100d8a2018-04-09 10:47:14 -05001054 }
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001055neg_exit:
1056 free_rsp_buf(resp_buftype, rsp);
1057 return rc;
1058}
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001059
Steve Frenchff1c0382013-11-19 23:44:46 -06001060int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
1061{
Long Li2796d302018-04-25 11:30:04 -07001062 int rc;
1063 struct validate_negotiate_info_req *pneg_inbuf;
David Disseldorpfe83bebc2017-10-20 14:49:37 +02001064 struct validate_negotiate_info_rsp *pneg_rsp = NULL;
Steve Frenchff1c0382013-11-19 23:44:46 -06001065 u32 rsplen;
Steve French9764c022017-09-17 10:41:35 -05001066 u32 inbuflen; /* max of 4 dialects */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001067 struct TCP_Server_Info *server = tcon->ses->server;
Steve Frenchff1c0382013-11-19 23:44:46 -06001068
1069 cifs_dbg(FYI, "validate negotiate\n");
1070
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001071 /* In SMB3.11 preauth integrity supersedes validate negotiate */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001072 if (server->dialect == SMB311_PROT_ID)
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001073 return 0;
1074
Steve Frenchff1c0382013-11-19 23:44:46 -06001075 /*
1076 * validation ioctl must be signed, so no point sending this if we
Steve French0603c962017-09-20 19:57:18 -05001077 * can not sign it (ie are not known user). Even if signing is not
1078 * required (enabled but not negotiated), in those cases we selectively
Steve Frenchff1c0382013-11-19 23:44:46 -06001079 * sign just this, the first and only signed request on a connection.
Steve French0603c962017-09-20 19:57:18 -05001080 * Having validation of negotiate info helps reduce attack vectors.
Steve Frenchff1c0382013-11-19 23:44:46 -06001081 */
Steve French0603c962017-09-20 19:57:18 -05001082 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
Steve Frenchff1c0382013-11-19 23:44:46 -06001083 return 0; /* validation requires signing */
1084
Steve French0603c962017-09-20 19:57:18 -05001085 if (tcon->ses->user_name == NULL) {
1086 cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
1087 return 0; /* validation requires signing */
1088 }
1089
1090 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001091 cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
Steve French0603c962017-09-20 19:57:18 -05001092
Long Li2796d302018-04-25 11:30:04 -07001093 pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
1094 if (!pneg_inbuf)
1095 return -ENOMEM;
1096
1097 pneg_inbuf->Capabilities =
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001098 cpu_to_le32(server->vals->req_capabilities);
Steve French679971e2021-05-07 18:24:11 -05001099 if (tcon->ses->chan_max > 1)
1100 pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1101
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001102 memcpy(pneg_inbuf->Guid, server->client_guid,
Sachin Prabhu39552ea2014-05-13 00:48:12 +01001103 SMB2_CLIENT_GUID_SIZE);
Steve Frenchff1c0382013-11-19 23:44:46 -06001104
1105 if (tcon->ses->sign)
Long Li2796d302018-04-25 11:30:04 -07001106 pneg_inbuf->SecurityMode =
Steve Frenchff1c0382013-11-19 23:44:46 -06001107 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1108 else if (global_secflags & CIFSSEC_MAY_SIGN)
Long Li2796d302018-04-25 11:30:04 -07001109 pneg_inbuf->SecurityMode =
Steve Frenchff1c0382013-11-19 23:44:46 -06001110 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1111 else
Long Li2796d302018-04-25 11:30:04 -07001112 pneg_inbuf->SecurityMode = 0;
Steve Frenchff1c0382013-11-19 23:44:46 -06001113
Steve French9764c022017-09-17 10:41:35 -05001114
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001115 if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -05001116 SMB3ANY_VERSION_STRING) == 0) {
Long Li2796d302018-04-25 11:30:04 -07001117 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1118 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
Steve French6dffa4c2021-02-02 00:03:58 -06001119 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1120 pneg_inbuf->DialectCount = cpu_to_le16(3);
1121 /* SMB 2.1 not included so subtract one dialect from len */
Long Li2796d302018-04-25 11:30:04 -07001122 inbuflen = sizeof(*pneg_inbuf) -
Steve French6dffa4c2021-02-02 00:03:58 -06001123 (sizeof(pneg_inbuf->Dialects[0]));
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001124 } else if (strcmp(server->vals->version_string,
Steve French9764c022017-09-17 10:41:35 -05001125 SMBDEFAULT_VERSION_STRING) == 0) {
Long Li2796d302018-04-25 11:30:04 -07001126 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1127 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1128 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
Steve Frenchd5c70762019-01-03 02:37:21 -06001129 pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1130 pneg_inbuf->DialectCount = cpu_to_le16(4);
Steve French6dffa4c2021-02-02 00:03:58 -06001131 /* structure is big enough for 4 dialects */
Long Li2796d302018-04-25 11:30:04 -07001132 inbuflen = sizeof(*pneg_inbuf);
Steve French9764c022017-09-17 10:41:35 -05001133 } else {
1134 /* otherwise specific dialect was requested */
Long Li2796d302018-04-25 11:30:04 -07001135 pneg_inbuf->Dialects[0] =
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001136 cpu_to_le16(server->vals->protocol_id);
Long Li2796d302018-04-25 11:30:04 -07001137 pneg_inbuf->DialectCount = cpu_to_le16(1);
Steve French9764c022017-09-17 10:41:35 -05001138 /* structure is big enough for 3 dialects, sending only 1 */
Long Li2796d302018-04-25 11:30:04 -07001139 inbuflen = sizeof(*pneg_inbuf) -
1140 sizeof(pneg_inbuf->Dialects[0]) * 2;
Steve French9764c022017-09-17 10:41:35 -05001141 }
Steve Frenchff1c0382013-11-19 23:44:46 -06001142
1143 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
1144 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001145 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
1146 (char **)&pneg_rsp, &rsplen);
Namjae Jeon969ae8e2019-01-22 09:46:45 +09001147 if (rc == -EOPNOTSUPP) {
1148 /*
1149 * Old Windows versions or Netapp SMB server can return
1150 * not supported error. Client should accept it.
1151 */
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001152 cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n");
Colin Ian King21078202019-05-17 09:12:33 +01001153 rc = 0;
1154 goto out_free_inbuf;
Namjae Jeon969ae8e2019-01-22 09:46:45 +09001155 } else if (rc != 0) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001156 cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
1157 rc);
Long Li2796d302018-04-25 11:30:04 -07001158 rc = -EIO;
1159 goto out_free_inbuf;
Steve Frenchff1c0382013-11-19 23:44:46 -06001160 }
1161
Long Li2796d302018-04-25 11:30:04 -07001162 rc = -EIO;
1163 if (rsplen != sizeof(*pneg_rsp)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001164 cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
1165 rsplen);
Steve French7db0a6e2017-05-03 21:12:20 -05001166
1167 /* relax check since Mac returns max bufsize allowed on ioctl */
Long Li2796d302018-04-25 11:30:04 -07001168 if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
1169 goto out_free_rsp;
Steve Frenchff1c0382013-11-19 23:44:46 -06001170 }
1171
1172 /* check validate negotiate info response matches what we got earlier */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001173 if (pneg_rsp->Dialect != cpu_to_le16(server->dialect))
Steve Frenchff1c0382013-11-19 23:44:46 -06001174 goto vneg_out;
1175
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001176 if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode))
Steve Frenchff1c0382013-11-19 23:44:46 -06001177 goto vneg_out;
1178
1179 /* do not validate server guid because not saved at negprot time yet */
1180
1181 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001182 SMB2_LARGE_FILES) != server->capabilities)
Steve Frenchff1c0382013-11-19 23:44:46 -06001183 goto vneg_out;
1184
1185 /* validate negotiate successful */
Long Li2796d302018-04-25 11:30:04 -07001186 rc = 0;
Steve Frenchff1c0382013-11-19 23:44:46 -06001187 cifs_dbg(FYI, "validate negotiate info successful\n");
Long Li2796d302018-04-25 11:30:04 -07001188 goto out_free_rsp;
Steve Frenchff1c0382013-11-19 23:44:46 -06001189
1190vneg_out:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001191 cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n");
Long Li2796d302018-04-25 11:30:04 -07001192out_free_rsp:
David Disseldorpfe83bebc2017-10-20 14:49:37 +02001193 kfree(pneg_rsp);
Long Li2796d302018-04-25 11:30:04 -07001194out_free_inbuf:
1195 kfree(pneg_inbuf);
1196 return rc;
Steve Frenchff1c0382013-11-19 23:44:46 -06001197}
1198
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301199enum securityEnum
1200smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
1201{
1202 switch (requested) {
1203 case Kerberos:
1204 case RawNTLMSSP:
1205 return requested;
1206 case NTLMv2:
1207 return RawNTLMSSP;
1208 case Unspecified:
1209 if (server->sec_ntlmssp &&
1210 (global_secflags & CIFSSEC_MAY_NTLMSSP))
1211 return RawNTLMSSP;
1212 if ((server->sec_kerberos || server->sec_mskerberos) &&
1213 (global_secflags & CIFSSEC_MAY_KRB5))
1214 return Kerberos;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001215 fallthrough;
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301216 default:
1217 return Unspecified;
1218 }
1219}
1220
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001221struct SMB2_sess_data {
1222 unsigned int xid;
1223 struct cifs_ses *ses;
1224 struct nls_table *nls_cp;
1225 void (*func)(struct SMB2_sess_data *);
1226 int result;
1227 u64 previous_session;
1228
1229 /* we will send the SMB in three pieces:
1230 * a fixed length beginning part, an optional
1231 * SPNEGO blob (which can be zero length), and a
1232 * last part which will include the strings
1233 * and rest of bcc area. This allows us to avoid
1234 * a large buffer 17K allocation
1235 */
1236 int buf0_type;
1237 struct kvec iov[2];
1238};
1239
1240static int
1241SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
1242{
1243 int rc;
1244 struct cifs_ses *ses = sess_data->ses;
1245 struct smb2_sess_setup_req *req;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001246 struct TCP_Server_Info *server = cifs_ses_server(ses);
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001247 unsigned int total_len;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001248
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001249 rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
1250 (void **) &req,
1251 &total_len);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001252 if (rc)
1253 return rc;
1254
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001255 if (sess_data->ses->binding) {
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001256 req->hdr.SessionId = cpu_to_le64(sess_data->ses->Suid);
1257 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001258 req->PreviousSessionId = 0;
1259 req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
1260 } else {
1261 /* First session, not a reauthenticate */
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001262 req->hdr.SessionId = 0;
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001263 /*
1264 * if reconnect, we need to send previous sess id
1265 * otherwise it is 0
1266 */
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10001267 req->PreviousSessionId = cpu_to_le64(sess_data->previous_session);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001268 req->Flags = 0; /* MBZ */
1269 }
Steve Frenchd4090142018-06-13 17:05:58 -05001270
1271 /* enough to enable echos and oplocks and one max size write */
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001272 req->hdr.CreditRequest = cpu_to_le16(130);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001273
1274 /* only one of SMB2 signing flags may be set in SMB2 request */
1275 if (server->sign)
1276 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
1277 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
1278 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
1279 else
1280 req->SecurityMode = 0;
1281
Steve French8d330962019-07-25 18:13:10 -05001282#ifdef CONFIG_CIFS_DFS_UPCALL
1283 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
1284#else
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001285 req->Capabilities = 0;
Steve French8d330962019-07-25 18:13:10 -05001286#endif /* DFS_UPCALL */
1287
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001288 req->Channel = 0; /* MBZ */
1289
1290 sess_data->iov[0].iov_base = (char *)req;
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001291 /* 1 for pad */
1292 sess_data->iov[0].iov_len = total_len - 1;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001293 /*
1294 * This variable will be used to clear the buffer
1295 * allocated above in case of any error in the calling function.
1296 */
1297 sess_data->buf0_type = CIFS_SMALL_BUFFER;
1298
1299 return 0;
1300}
1301
1302static void
1303SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
1304{
1305 free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
1306 sess_data->buf0_type = CIFS_NO_BUFFER;
1307}
1308
1309static int
1310SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
1311{
1312 int rc;
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001313 struct smb_rqst rqst;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001314 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001315 struct kvec rsp_iov = { NULL, 0 };
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001316
1317 /* Testing shows that buffer offset must be at location of Buffer[0] */
1318 req->SecurityBufferOffset =
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001319 cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001320 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
1321
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001322 memset(&rqst, 0, sizeof(struct smb_rqst));
1323 rqst.rq_iov = sess_data->iov;
1324 rqst.rq_nvec = 2;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001325
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001326 /* BB add code to build os and lm fields */
1327 rc = cifs_send_recv(sess_data->xid, sess_data->ses,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001328 cifs_ses_server(sess_data->ses),
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001329 &rqst,
Ronnie Sahlberg88ea5cb2017-11-20 11:24:36 +11001330 &sess_data->buf0_type,
Shyam Prasad N0f56db82021-02-03 22:49:52 -08001331 CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001332 cifs_small_buf_release(sess_data->iov[0].iov_base);
1333 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001334
1335 return rc;
1336}
1337
1338static int
1339SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
1340{
1341 int rc = 0;
1342 struct cifs_ses *ses = sess_data->ses;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001343 struct TCP_Server_Info *server = cifs_ses_server(ses);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001344
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001345 mutex_lock(&server->srv_mutex);
1346 if (server->ops->generate_signingkey) {
1347 rc = server->ops->generate_signingkey(ses);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001348 if (rc) {
1349 cifs_dbg(FYI,
1350 "SMB3 session key generation failed\n");
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001351 mutex_unlock(&server->srv_mutex);
Pavel Shilovskycabfb362016-11-07 18:20:50 -08001352 return rc;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001353 }
1354 }
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001355 if (!server->session_estab) {
1356 server->sequence_number = 0x2;
1357 server->session_estab = true;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001358 }
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001359 mutex_unlock(&server->srv_mutex);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001360
1361 cifs_dbg(FYI, "SMB2/3 session established successfully\n");
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001362 /* keep existing ses state if binding */
1363 if (!ses->binding) {
1364 spin_lock(&GlobalMid_Lock);
1365 ses->status = CifsGood;
1366 ses->need_reconnect = false;
1367 spin_unlock(&GlobalMid_Lock);
1368 }
1369
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001370 return rc;
1371}
1372
1373#ifdef CONFIG_CIFS_UPCALL
1374static void
1375SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1376{
1377 int rc;
1378 struct cifs_ses *ses = sess_data->ses;
1379 struct cifs_spnego_msg *msg;
1380 struct key *spnego_key = NULL;
1381 struct smb2_sess_setup_rsp *rsp = NULL;
1382
1383 rc = SMB2_sess_alloc_buffer(sess_data);
1384 if (rc)
1385 goto out;
1386
1387 spnego_key = cifs_get_spnego_key(ses);
1388 if (IS_ERR(spnego_key)) {
1389 rc = PTR_ERR(spnego_key);
Steve French0a018942020-07-16 00:34:21 -05001390 if (rc == -ENOKEY)
1391 cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001392 spnego_key = NULL;
1393 goto out;
1394 }
1395
1396 msg = spnego_key->payload.data[0];
1397 /*
1398 * check version field to make sure that cifs.upcall is
1399 * sending us a response in an expected form
1400 */
1401 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001402 cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
1403 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001404 rc = -EKEYREJECTED;
1405 goto out_put_spnego_key;
1406 }
1407
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001408 /* keep session key if binding */
1409 if (!ses->binding) {
1410 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
1411 GFP_KERNEL);
1412 if (!ses->auth_key.response) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001413 cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001414 msg->sesskey_len);
1415 rc = -ENOMEM;
1416 goto out_put_spnego_key;
1417 }
1418 ses->auth_key.len = msg->sesskey_len;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001419 }
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001420
1421 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
1422 sess_data->iov[1].iov_len = msg->secblob_len;
1423
1424 rc = SMB2_sess_sendreceive(sess_data);
1425 if (rc)
1426 goto out_put_spnego_key;
1427
1428 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001429 /* keep session id and flags if binding */
1430 if (!ses->binding) {
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001431 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001432 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1433 }
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001434
1435 rc = SMB2_sess_establish_session(sess_data);
1436out_put_spnego_key:
1437 key_invalidate(spnego_key);
1438 key_put(spnego_key);
1439out:
1440 sess_data->result = rc;
1441 sess_data->func = NULL;
1442 SMB2_sess_free_buffer(sess_data);
1443}
1444#else
1445static void
1446SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1447{
1448 cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
1449 sess_data->result = -EOPNOTSUPP;
1450 sess_data->func = NULL;
1451}
1452#endif
1453
Sachin Prabhu166cea42016-10-07 19:11:22 +01001454static void
1455SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
1456
1457static void
1458SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
1459{
1460 int rc;
1461 struct cifs_ses *ses = sess_data->ses;
1462 struct smb2_sess_setup_rsp *rsp = NULL;
Shyam Prasad N49bd49f2021-11-05 19:03:57 +00001463 unsigned char *ntlmssp_blob = NULL;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001464 bool use_spnego = false; /* else use raw ntlmssp */
1465 u16 blob_length = 0;
1466
1467 /*
1468 * If memory allocation is successful, caller of this function
1469 * frees it.
1470 */
1471 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
1472 if (!ses->ntlmssp) {
1473 rc = -ENOMEM;
1474 goto out_err;
1475 }
1476 ses->ntlmssp->sesskey_per_smbsess = true;
1477
1478 rc = SMB2_sess_alloc_buffer(sess_data);
1479 if (rc)
1480 goto out_err;
1481
Shyam Prasad N49bd49f2021-11-05 19:03:57 +00001482 rc = build_ntlmssp_negotiate_blob(&ntlmssp_blob,
1483 &blob_length, ses,
1484 sess_data->nls_cp);
1485 if (rc)
1486 goto out_err;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001487
Sachin Prabhu166cea42016-10-07 19:11:22 +01001488 if (use_spnego) {
1489 /* BB eventually need to add this */
1490 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1491 rc = -EOPNOTSUPP;
1492 goto out;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001493 }
1494 sess_data->iov[1].iov_base = ntlmssp_blob;
1495 sess_data->iov[1].iov_len = blob_length;
1496
1497 rc = SMB2_sess_sendreceive(sess_data);
1498 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1499
1500 /* If true, rc here is expected and not an error */
1501 if (sess_data->buf0_type != CIFS_NO_BUFFER &&
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001502 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
Sachin Prabhu166cea42016-10-07 19:11:22 +01001503 rc = 0;
1504
1505 if (rc)
1506 goto out;
1507
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10001508 if (offsetof(struct smb2_sess_setup_rsp, Buffer) !=
Sachin Prabhu166cea42016-10-07 19:11:22 +01001509 le16_to_cpu(rsp->SecurityBufferOffset)) {
1510 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
1511 le16_to_cpu(rsp->SecurityBufferOffset));
1512 rc = -EIO;
1513 goto out;
1514 }
1515 rc = decode_ntlmssp_challenge(rsp->Buffer,
1516 le16_to_cpu(rsp->SecurityBufferLength), ses);
1517 if (rc)
1518 goto out;
1519
1520 cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
1521
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001522 /* keep existing ses id and flags if binding */
1523 if (!ses->binding) {
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001524 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001525 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1526 }
Sachin Prabhu166cea42016-10-07 19:11:22 +01001527
1528out:
1529 kfree(ntlmssp_blob);
1530 SMB2_sess_free_buffer(sess_data);
1531 if (!rc) {
1532 sess_data->result = 0;
1533 sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
1534 return;
1535 }
1536out_err:
1537 kfree(ses->ntlmssp);
1538 ses->ntlmssp = NULL;
1539 sess_data->result = rc;
1540 sess_data->func = NULL;
1541}
1542
1543static void
1544SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
1545{
1546 int rc;
1547 struct cifs_ses *ses = sess_data->ses;
1548 struct smb2_sess_setup_req *req;
1549 struct smb2_sess_setup_rsp *rsp = NULL;
1550 unsigned char *ntlmssp_blob = NULL;
1551 bool use_spnego = false; /* else use raw ntlmssp */
1552 u16 blob_length = 0;
1553
1554 rc = SMB2_sess_alloc_buffer(sess_data);
1555 if (rc)
1556 goto out;
1557
1558 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001559 req->hdr.SessionId = cpu_to_le64(ses->Suid);
Sachin Prabhu166cea42016-10-07 19:11:22 +01001560
1561 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
1562 sess_data->nls_cp);
1563 if (rc) {
1564 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
1565 goto out;
1566 }
1567
1568 if (use_spnego) {
1569 /* BB eventually need to add this */
1570 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1571 rc = -EOPNOTSUPP;
1572 goto out;
1573 }
1574 sess_data->iov[1].iov_base = ntlmssp_blob;
1575 sess_data->iov[1].iov_len = blob_length;
1576
1577 rc = SMB2_sess_sendreceive(sess_data);
1578 if (rc)
1579 goto out;
1580
1581 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1582
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001583 /* keep existing ses id and flags if binding */
1584 if (!ses->binding) {
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001585 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001586 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1587 }
Sachin Prabhu166cea42016-10-07 19:11:22 +01001588
1589 rc = SMB2_sess_establish_session(sess_data);
Ronnie Sahlbergf560cda2020-04-12 16:09:26 +10001590#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
1591 if (ses->server->dialect < SMB30_PROT_ID) {
1592 cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__);
1593 /*
1594 * The session id is opaque in terms of endianness, so we can't
1595 * print it as a long long. we dump it as we got it on the wire
1596 */
1597 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid),
1598 &ses->Suid);
1599 cifs_dbg(VFS, "Session Key %*ph\n",
1600 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
1601 cifs_dbg(VFS, "Signing Key %*ph\n",
1602 SMB3_SIGN_KEY_SIZE, ses->auth_key.response);
1603 }
1604#endif
Sachin Prabhu166cea42016-10-07 19:11:22 +01001605out:
1606 kfree(ntlmssp_blob);
1607 SMB2_sess_free_buffer(sess_data);
1608 kfree(ses->ntlmssp);
1609 ses->ntlmssp = NULL;
1610 sess_data->result = rc;
1611 sess_data->func = NULL;
1612}
1613
1614static int
1615SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
1616{
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301617 int type;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001618
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001619 type = smb2_select_sectype(cifs_ses_server(ses), ses->sectype);
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301620 cifs_dbg(FYI, "sess setup type %d\n", type);
1621 if (type == Unspecified) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001622 cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301623 return -EINVAL;
1624 }
1625
1626 switch (type) {
Sachin Prabhu166cea42016-10-07 19:11:22 +01001627 case Kerberos:
1628 sess_data->func = SMB2_auth_kerberos;
1629 break;
1630 case RawNTLMSSP:
1631 sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
1632 break;
1633 default:
Sachin Prabhuef65aae2017-01-18 15:35:57 +05301634 cifs_dbg(VFS, "secType %d not supported!\n", type);
Sachin Prabhu166cea42016-10-07 19:11:22 +01001635 return -EOPNOTSUPP;
1636 }
1637
1638 return 0;
1639}
1640
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001641int
1642SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
1643 const struct nls_table *nls_cp)
1644{
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001645 int rc = 0;
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001646 struct TCP_Server_Info *server = cifs_ses_server(ses);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001647 struct SMB2_sess_data *sess_data;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001648
Joe Perchesf96637b2013-05-04 22:12:25 -05001649 cifs_dbg(FYI, "Session Setup\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001650
Jeff Layton3534b852013-05-24 07:41:01 -04001651 if (!server) {
1652 WARN(1, "%s: server is NULL!\n", __func__);
1653 return -EIO;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001654 }
1655
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001656 sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
1657 if (!sess_data)
1658 return -ENOMEM;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001659
1660 rc = SMB2_select_sec(ses, sess_data);
1661 if (rc)
1662 goto out;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001663 sess_data->xid = xid;
1664 sess_data->ses = ses;
1665 sess_data->buf0_type = CIFS_NO_BUFFER;
1666 sess_data->nls_cp = (struct nls_table *) nls_cp;
Steve Frenchb2adf22f2018-05-31 15:19:25 -05001667 sess_data->previous_session = ses->Suid;
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001668
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001669 /*
1670 * Initialize the session hash with the server one.
1671 */
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +02001672 memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001673 SMB2_PREAUTH_HASH_SIZE);
Aurelien Aptel8bd68c62018-02-16 19:19:29 +01001674
Sachin Prabhu166cea42016-10-07 19:11:22 +01001675 while (sess_data->func)
1676 sess_data->func(sess_data);
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001677
Steve Frenchc721c382017-09-19 18:40:03 -05001678 if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001679 cifs_server_dbg(VFS, "signing requested but authenticated as guest\n");
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001680 rc = sess_data->result;
Sachin Prabhu166cea42016-10-07 19:11:22 +01001681out:
Sachin Prabhu3baf1a72016-10-07 19:11:21 +01001682 kfree(sess_data);
1683 return rc;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001684}
1685
1686int
1687SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
1688{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001689 struct smb_rqst rqst;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001690 struct smb2_logoff_req *req; /* response is also trivial struct */
1691 int rc = 0;
1692 struct TCP_Server_Info *server;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001693 int flags = 0;
Ronnie Sahlberg45305ed2017-11-09 12:14:17 +11001694 unsigned int total_len;
1695 struct kvec iov[1];
1696 struct kvec rsp_iov;
1697 int resp_buf_type;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001698
Joe Perchesf96637b2013-05-04 22:12:25 -05001699 cifs_dbg(FYI, "disconnect session %p\n", ses);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001700
1701 if (ses && (ses->server))
1702 server = ses->server;
1703 else
1704 return -EIO;
1705
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -05001706 /* no need to send SMB logoff if uid already closed due to reconnect */
1707 if (ses->need_reconnect)
1708 goto smb2_session_already_dead;
1709
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001710 rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
1711 (void **) &req, &total_len);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001712 if (rc)
1713 return rc;
1714
1715 /* since no tcon, smb2_init can not do this, so do here */
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001716 req->hdr.SessionId = cpu_to_le64(ses->Suid);
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001717
1718 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
1719 flags |= CIFS_TRANSFORM_REQ;
1720 else if (server->sign)
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001721 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001722
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10001723 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg45305ed2017-11-09 12:14:17 +11001724
1725 iov[0].iov_base = (char *)req;
1726 iov[0].iov_len = total_len;
1727
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001728 memset(&rqst, 0, sizeof(struct smb_rqst));
1729 rqst.rq_iov = iov;
1730 rqst.rq_nvec = 1;
1731
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001732 rc = cifs_send_recv(xid, ses, ses->server,
1733 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001734 cifs_small_buf_release(req);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001735 /*
1736 * No tcon so can't do
1737 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
1738 */
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -05001739
1740smb2_session_already_dead:
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04001741 return rc;
1742}
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001743
1744static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
1745{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001746 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001747}
1748
1749#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
1750
Steve Frenchde9f68df2013-11-15 11:26:24 -06001751/* These are similar values to what Windows uses */
1752static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
1753{
1754 tcon->max_chunks = 256;
1755 tcon->max_bytes_chunk = 1048576;
1756 tcon->max_bytes_copy = 16777216;
1757}
1758
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001759int
1760SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1761 struct cifs_tcon *tcon, const struct nls_table *cp)
1762{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001763 struct smb_rqst rqst;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001764 struct smb2_tree_connect_req *req;
1765 struct smb2_tree_connect_rsp *rsp = NULL;
1766 struct kvec iov[2];
Aurélien Apteldb3b5472017-10-11 13:23:36 +02001767 struct kvec rsp_iov = { NULL, 0 };
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001768 int rc = 0;
1769 int resp_buftype;
1770 int unc_path_len;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001771 __le16 *unc_path = NULL;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001772 int flags = 0;
Ronnie Sahlberg661bb9432017-11-09 12:14:23 +11001773 unsigned int total_len;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001774 struct TCP_Server_Info *server;
1775
1776 /* always use master channel */
1777 server = ses->server;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001778
Joe Perchesf96637b2013-05-04 22:12:25 -05001779 cifs_dbg(FYI, "TCON\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001780
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001781 if (!server || !tree)
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001782 return -EIO;
1783
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001784 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
1785 if (unc_path == NULL)
1786 return -ENOMEM;
1787
1788 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
1789 unc_path_len *= 2;
1790 if (unc_path_len < 2) {
1791 kfree(unc_path);
1792 return -EINVAL;
1793 }
1794
Jan-Marek Glogowski806a28e2017-02-20 12:25:58 +01001795 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
Aurelien Aptelb327a712018-01-24 13:46:10 +01001796 tcon->tid = 0;
Steve Frenchfae80442018-10-19 17:14:32 -05001797 atomic_set(&tcon->num_remote_opens, 0);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001798 rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
1799 (void **) &req, &total_len);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001800 if (rc) {
1801 kfree(unc_path);
1802 return rc;
1803 }
1804
Steve French5a77e752018-05-09 17:43:08 -05001805 if (smb3_encryption_required(tcon))
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -08001806 flags |= CIFS_TRANSFORM_REQ;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001807
1808 iov[0].iov_base = (char *)req;
Ronnie Sahlberg661bb9432017-11-09 12:14:23 +11001809 /* 1 for pad */
1810 iov[0].iov_len = total_len - 1;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001811
1812 /* Testing shows that buffer offset must be at location of Buffer[0] */
1813 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
Ronnie Sahlberg661bb9432017-11-09 12:14:23 +11001814 - 1 /* pad */);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001815 req->PathLength = cpu_to_le16(unc_path_len - 2);
1816 iov[1].iov_base = unc_path;
1817 iov[1].iov_len = unc_path_len;
1818
Ronnie Sahlberge71ab2a2019-03-21 14:59:02 +10001819 /*
1820 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
1821 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
Steve French8c11a602019-03-22 22:31:17 -05001822 * (Samba servers don't always set the flag so also check if null user)
Ronnie Sahlberge71ab2a2019-03-21 14:59:02 +10001823 */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001824 if ((server->dialect == SMB311_PROT_ID) &&
Ronnie Sahlberge71ab2a2019-03-21 14:59:02 +10001825 !smb3_encryption_required(tcon) &&
Steve French8c11a602019-03-22 22:31:17 -05001826 !(ses->session_flags &
1827 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
1828 ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001829 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Steve French6188f282018-03-13 02:29:36 -05001830
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001831 memset(&rqst, 0, sizeof(struct smb_rqst));
1832 rqst.rq_iov = iov;
1833 rqst.rq_nvec = 2;
1834
Steve French4fe75c42019-02-14 01:19:02 -06001835 /* Need 64 for max size write so ask for more in case not there yet */
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001836 req->hdr.CreditRequest = cpu_to_le16(64);
Steve French4fe75c42019-02-14 01:19:02 -06001837
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001838 rc = cifs_send_recv(xid, ses, server,
1839 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001840 cifs_small_buf_release(req);
1841 rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
Steve Frenchf8af49d2018-10-28 00:47:11 -05001842 trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
Steve Frenchbac35392021-11-11 16:18:14 -06001843 if ((rc != 0) || (rsp == NULL)) {
Steve French35591342021-06-19 12:01:37 -05001844 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
1845 tcon->need_reconnect = true;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001846 goto tcon_error_exit;
1847 }
1848
Christophe JAILLETcd123002017-05-12 17:59:32 +02001849 switch (rsp->ShareType) {
1850 case SMB2_SHARE_TYPE_DISK:
Joe Perchesf96637b2013-05-04 22:12:25 -05001851 cifs_dbg(FYI, "connection to disk share\n");
Christophe JAILLETcd123002017-05-12 17:59:32 +02001852 break;
1853 case SMB2_SHARE_TYPE_PIPE:
Aurelien Aptelb327a712018-01-24 13:46:10 +01001854 tcon->pipe = true;
Joe Perchesf96637b2013-05-04 22:12:25 -05001855 cifs_dbg(FYI, "connection to pipe share\n");
Christophe JAILLETcd123002017-05-12 17:59:32 +02001856 break;
1857 case SMB2_SHARE_TYPE_PRINT:
Aurelien Aptelb327a712018-01-24 13:46:10 +01001858 tcon->print = true;
Joe Perchesf96637b2013-05-04 22:12:25 -05001859 cifs_dbg(FYI, "connection to printer\n");
Christophe JAILLETcd123002017-05-12 17:59:32 +02001860 break;
1861 default:
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001862 cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001863 rc = -EOPNOTSUPP;
1864 goto tcon_error_exit;
1865 }
1866
1867 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
Steve French769ee6a2013-06-19 14:15:30 -05001868 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001869 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
1870 tcon->tidStatus = CifsGood;
1871 tcon->need_reconnect = false;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001872 tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
Zhao Hongjiang46b51d02013-06-24 01:57:47 -05001873 strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001874
1875 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
1876 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001877 cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n");
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -08001878
1879 if (tcon->seal &&
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001880 !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001881 cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n");
Pavel Shilovskyae6f8dd2016-11-17 13:59:23 -08001882
Steve Frenchde9f68df2013-11-15 11:26:24 -06001883 init_copy_chunk_defaults(tcon);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001884 if (server->ops->validate_negotiate)
1885 rc = server->ops->validate_negotiate(xid, tcon);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001886tcon_exit:
Steve Frenchf8af49d2018-10-28 00:47:11 -05001887
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001888 free_rsp_buf(resp_buftype, rsp);
1889 kfree(unc_path);
1890 return rc;
1891
1892tcon_error_exit:
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09001893 if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME)
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001894 cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001895 goto tcon_exit;
1896}
1897
1898int
1899SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
1900{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001901 struct smb_rqst rqst;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001902 struct smb2_tree_disconnect_req *req; /* response is trivial */
1903 int rc = 0;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001904 struct cifs_ses *ses = tcon->ses;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001905 int flags = 0;
Ronnie Sahlberg4eecf4c2017-11-09 12:14:18 +11001906 unsigned int total_len;
1907 struct kvec iov[1];
1908 struct kvec rsp_iov;
1909 int resp_buf_type;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001910
Joe Perchesf96637b2013-05-04 22:12:25 -05001911 cifs_dbg(FYI, "Tree Disconnect\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001912
Christos Gkekas68a6afa2017-07-09 11:45:04 +01001913 if (!ses || !(ses->server))
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001914 return -EIO;
1915
1916 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
1917 return 0;
1918
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +10001919 close_cached_dir_lease(&tcon->crfid);
Ronnie Sahlberg72e73c72019-11-07 17:00:38 +10001920
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001921 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
1922 (void **) &req,
1923 &total_len);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001924 if (rc)
1925 return rc;
1926
Steve French5a77e752018-05-09 17:43:08 -05001927 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07001928 flags |= CIFS_TRANSFORM_REQ;
1929
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10001930 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg4eecf4c2017-11-09 12:14:18 +11001931
1932 iov[0].iov_base = (char *)req;
1933 iov[0].iov_len = total_len;
1934
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10001935 memset(&rqst, 0, sizeof(struct smb_rqst));
1936 rqst.rq_iov = iov;
1937 rqst.rq_nvec = 1;
1938
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001939 rc = cifs_send_recv(xid, ses, ses->server,
1940 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07001941 cifs_small_buf_release(req);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04001942 if (rc)
1943 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
1944
1945 return rc;
1946}
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001947
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001948
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001949static struct create_durable *
1950create_durable_buf(void)
1951{
1952 struct create_durable *buf;
1953
1954 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
1955 if (!buf)
1956 return NULL;
1957
1958 buf->ccontext.DataOffset = cpu_to_le16(offsetof
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001959 (struct create_durable, Data));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001960 buf->ccontext.DataLength = cpu_to_le32(16);
1961 buf->ccontext.NameOffset = cpu_to_le16(offsetof
1962 (struct create_durable, Name));
1963 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07001964 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001965 buf->Name[0] = 'D';
1966 buf->Name[1] = 'H';
1967 buf->Name[2] = 'n';
1968 buf->Name[3] = 'Q';
1969 return buf;
1970}
1971
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001972static struct create_durable *
1973create_reconnect_durable_buf(struct cifs_fid *fid)
1974{
1975 struct create_durable *buf;
1976
1977 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
1978 if (!buf)
1979 return NULL;
1980
1981 buf->ccontext.DataOffset = cpu_to_le16(offsetof
1982 (struct create_durable, Data));
1983 buf->ccontext.DataLength = cpu_to_le32(16);
1984 buf->ccontext.NameOffset = cpu_to_le16(offsetof
1985 (struct create_durable, Name));
1986 buf->ccontext.NameLength = cpu_to_le16(4);
1987 buf->Data.Fid.PersistentFileId = fid->persistent_fid;
1988 buf->Data.Fid.VolatileFileId = fid->volatile_fid;
Steve French12197a72014-05-14 05:29:40 -07001989 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001990 buf->Name[0] = 'D';
1991 buf->Name[1] = 'H';
1992 buf->Name[2] = 'n';
1993 buf->Name[3] = 'C';
1994 return buf;
1995}
1996
Steve French89a5bfa2019-07-18 17:22:18 -05001997static void
1998parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
1999{
2000 struct create_on_disk_id *pdisk_id = (struct create_on_disk_id *)cc;
2001
2002 cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n",
2003 pdisk_id->DiskFileId, pdisk_id->VolumeId);
2004 buf->IndexNumber = pdisk_id->DiskFileId;
2005}
2006
Steve Frenchab3459d2020-02-06 17:31:56 -06002007static void
Aurelien Aptel69dda302020-03-02 17:53:22 +01002008parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
2009 struct create_posix_rsp *posix)
Steve Frenchab3459d2020-02-06 17:31:56 -06002010{
Aurelien Aptel69dda302020-03-02 17:53:22 +01002011 int sid_len;
2012 u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset);
2013 u8 *end = beg + le32_to_cpu(cc->DataLength);
2014 u8 *sid;
Steve Frenchab3459d2020-02-06 17:31:56 -06002015
Aurelien Aptel69dda302020-03-02 17:53:22 +01002016 memset(posix, 0, sizeof(*posix));
Aurelien Aptel2e8af972020-02-08 15:50:56 +01002017
Aurelien Aptel69dda302020-03-02 17:53:22 +01002018 posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0));
2019 posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4));
2020 posix->mode = le32_to_cpu(*(__le32 *)(beg + 8));
2021
2022 sid = beg + 12;
2023 sid_len = posix_info_sid_size(sid, end);
2024 if (sid_len < 0) {
2025 cifs_dbg(VFS, "bad owner sid in posix create response\n");
2026 return;
2027 }
2028 memcpy(&posix->owner, sid, sid_len);
2029
2030 sid = sid + sid_len;
2031 sid_len = posix_info_sid_size(sid, end);
2032 if (sid_len < 0) {
2033 cifs_dbg(VFS, "bad group sid in posix create response\n");
2034 return;
2035 }
2036 memcpy(&posix->group, sid, sid_len);
2037
2038 cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n",
2039 posix->nlink, posix->mode, posix->reparse_tag);
Steve Frenchab3459d2020-02-06 17:31:56 -06002040}
2041
Steve French89a5bfa2019-07-18 17:22:18 -05002042void
2043smb2_parse_contexts(struct TCP_Server_Info *server,
Aurelien Aptel69dda302020-03-02 17:53:22 +01002044 struct smb2_create_rsp *rsp,
2045 unsigned int *epoch, char *lease_key, __u8 *oplock,
2046 struct smb2_file_all_info *buf,
2047 struct create_posix_rsp *posix)
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002048{
2049 char *data_offset;
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002050 struct create_context *cc;
Justin Maggarddeb7def2016-02-09 15:52:08 -08002051 unsigned int next;
2052 unsigned int remaining;
Pavel Shilovskyfd554392013-07-09 19:44:56 +04002053 char *name;
Colin Ian King3ece60e2020-10-20 15:19:36 +01002054 static const char smb3_create_tag_posix[] = {
2055 0x93, 0xAD, 0x25, 0x50, 0x9C,
2056 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83,
2057 0xDE, 0x96, 0x8B, 0xCD, 0x7C
2058 };
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002059
Steve French89a5bfa2019-07-18 17:22:18 -05002060 *oplock = 0;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002061 data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
Justin Maggarddeb7def2016-02-09 15:52:08 -08002062 remaining = le32_to_cpu(rsp->CreateContextsLength);
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002063 cc = (struct create_context *)data_offset;
Steve French89a5bfa2019-07-18 17:22:18 -05002064
2065 /* Initialize inode number to 0 in case no valid data in qfid context */
2066 if (buf)
2067 buf->IndexNumber = 0;
2068
Justin Maggarddeb7def2016-02-09 15:52:08 -08002069 while (remaining >= sizeof(struct create_context)) {
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002070 name = le16_to_cpu(cc->NameOffset) + (char *)cc;
Justin Maggarddeb7def2016-02-09 15:52:08 -08002071 if (le16_to_cpu(cc->NameLength) == 4 &&
Steve French89a5bfa2019-07-18 17:22:18 -05002072 strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0)
2073 *oplock = server->ops->parse_lease_buf(cc, epoch,
2074 lease_key);
2075 else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
2076 strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
2077 parse_query_id_ctxt(cc, buf);
Steve Frenchab3459d2020-02-06 17:31:56 -06002078 else if ((le16_to_cpu(cc->NameLength) == 16)) {
Aurelien Aptel69dda302020-03-02 17:53:22 +01002079 if (posix &&
2080 memcmp(name, smb3_create_tag_posix, 16) == 0)
2081 parse_posix_ctxt(cc, buf, posix);
Steve Frenchab3459d2020-02-06 17:31:56 -06002082 }
2083 /* else {
2084 cifs_dbg(FYI, "Context not matched with len %d\n",
2085 le16_to_cpu(cc->NameLength));
2086 cifs_dump_mem("Cctxt name: ", name, 4);
2087 } */
Justin Maggarddeb7def2016-02-09 15:52:08 -08002088
2089 next = le32_to_cpu(cc->Next);
2090 if (!next)
2091 break;
2092 remaining -= next;
2093 cc = (struct create_context *)((char *)cc + next);
2094 }
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002095
Steve French89a5bfa2019-07-18 17:22:18 -05002096 if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
2097 *oplock = rsp->OplockLevel;
2098
2099 return;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002100}
2101
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002102static int
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002103add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
Stefano Brivio729c0c92018-07-05 15:10:02 +02002104 unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002105{
2106 struct smb2_create_req *req = iov[0].iov_base;
2107 unsigned int num = *num_iovec;
2108
Stefano Brivio729c0c92018-07-05 15:10:02 +02002109 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002110 if (iov[num].iov_base == NULL)
2111 return -ENOMEM;
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002112 iov[num].iov_len = server->vals->create_lease_size;
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002113 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
2114 if (!req->CreateContextsOffset)
2115 req->CreateContextsOffset = cpu_to_le32(
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002116 sizeof(struct smb2_create_req) +
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002117 iov[num - 1].iov_len);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002118 le32_add_cpu(&req->CreateContextsLength,
2119 server->vals->create_lease_size);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002120 *num_iovec = num + 1;
2121 return 0;
2122}
2123
Steve Frenchb56eae42015-11-03 09:26:27 -06002124static struct create_durable_v2 *
Steve Frenchca567eb2019-03-29 16:31:07 -05002125create_durable_v2_buf(struct cifs_open_parms *oparms)
Steve Frenchb56eae42015-11-03 09:26:27 -06002126{
Steve Frenchca567eb2019-03-29 16:31:07 -05002127 struct cifs_fid *pfid = oparms->fid;
Steve Frenchb56eae42015-11-03 09:26:27 -06002128 struct create_durable_v2 *buf;
2129
2130 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
2131 if (!buf)
2132 return NULL;
2133
2134 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2135 (struct create_durable_v2, dcontext));
2136 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
2137 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2138 (struct create_durable_v2, Name));
2139 buf->ccontext.NameLength = cpu_to_le16(4);
2140
Steve Frenchca567eb2019-03-29 16:31:07 -05002141 /*
2142 * NB: Handle timeout defaults to 0, which allows server to choose
2143 * (most servers default to 120 seconds) and most clients default to 0.
2144 * This can be overridden at mount ("handletimeout=") if the user wants
2145 * a different persistent (or resilient) handle timeout for all opens
2146 * opens on a particular SMB3 mount.
2147 */
2148 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
Steve Frenchb56eae42015-11-03 09:26:27 -06002149 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
Steve Frenchfa70b872016-09-22 00:39:34 -05002150 generate_random_uuid(buf->dcontext.CreateGuid);
Steve Frenchb56eae42015-11-03 09:26:27 -06002151 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
2152
2153 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
2154 buf->Name[0] = 'D';
2155 buf->Name[1] = 'H';
2156 buf->Name[2] = '2';
2157 buf->Name[3] = 'Q';
2158 return buf;
2159}
2160
2161static struct create_durable_handle_reconnect_v2 *
2162create_reconnect_durable_v2_buf(struct cifs_fid *fid)
2163{
2164 struct create_durable_handle_reconnect_v2 *buf;
2165
2166 buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
2167 GFP_KERNEL);
2168 if (!buf)
2169 return NULL;
2170
2171 buf->ccontext.DataOffset =
2172 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2173 dcontext));
2174 buf->ccontext.DataLength =
2175 cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
2176 buf->ccontext.NameOffset =
2177 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2178 Name));
2179 buf->ccontext.NameLength = cpu_to_le16(4);
2180
2181 buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
2182 buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
2183 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2184 memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
2185
2186 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
2187 buf->Name[0] = 'D';
2188 buf->Name[1] = 'H';
2189 buf->Name[2] = '2';
2190 buf->Name[3] = 'C';
2191 return buf;
2192}
2193
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002194static int
Steve Frenchb56eae42015-11-03 09:26:27 -06002195add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002196 struct cifs_open_parms *oparms)
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002197{
2198 struct smb2_create_req *req = iov[0].iov_base;
2199 unsigned int num = *num_iovec;
2200
Steve Frenchca567eb2019-03-29 16:31:07 -05002201 iov[num].iov_base = create_durable_v2_buf(oparms);
Steve Frenchb56eae42015-11-03 09:26:27 -06002202 if (iov[num].iov_base == NULL)
2203 return -ENOMEM;
2204 iov[num].iov_len = sizeof(struct create_durable_v2);
2205 if (!req->CreateContextsOffset)
2206 req->CreateContextsOffset =
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002207 cpu_to_le32(sizeof(struct smb2_create_req) +
Steve Frenchb56eae42015-11-03 09:26:27 -06002208 iov[1].iov_len);
2209 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
Steve Frenchb56eae42015-11-03 09:26:27 -06002210 *num_iovec = num + 1;
2211 return 0;
2212}
2213
2214static int
2215add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
2216 struct cifs_open_parms *oparms)
2217{
2218 struct smb2_create_req *req = iov[0].iov_base;
2219 unsigned int num = *num_iovec;
2220
2221 /* indicate that we don't need to relock the file */
2222 oparms->reconnect = false;
2223
2224 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
2225 if (iov[num].iov_base == NULL)
2226 return -ENOMEM;
2227 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
2228 if (!req->CreateContextsOffset)
2229 req->CreateContextsOffset =
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002230 cpu_to_le32(sizeof(struct smb2_create_req) +
Steve Frenchb56eae42015-11-03 09:26:27 -06002231 iov[1].iov_len);
2232 le32_add_cpu(&req->CreateContextsLength,
2233 sizeof(struct create_durable_handle_reconnect_v2));
Steve Frenchb56eae42015-11-03 09:26:27 -06002234 *num_iovec = num + 1;
2235 return 0;
2236}
2237
2238static int
2239add_durable_context(struct kvec *iov, unsigned int *num_iovec,
2240 struct cifs_open_parms *oparms, bool use_persistent)
2241{
2242 struct smb2_create_req *req = iov[0].iov_base;
2243 unsigned int num = *num_iovec;
2244
2245 if (use_persistent) {
2246 if (oparms->reconnect)
2247 return add_durable_reconnect_v2_context(iov, num_iovec,
2248 oparms);
2249 else
2250 return add_durable_v2_context(iov, num_iovec, oparms);
2251 }
2252
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002253 if (oparms->reconnect) {
2254 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
2255 /* indicate that we don't need to relock the file */
2256 oparms->reconnect = false;
2257 } else
2258 iov[num].iov_base = create_durable_buf();
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002259 if (iov[num].iov_base == NULL)
2260 return -ENOMEM;
2261 iov[num].iov_len = sizeof(struct create_durable);
2262 if (!req->CreateContextsOffset)
2263 req->CreateContextsOffset =
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002264 cpu_to_le32(sizeof(struct smb2_create_req) +
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002265 iov[1].iov_len);
Wei Yongjun31f92e92013-08-26 14:34:46 +08002266 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002267 *num_iovec = num + 1;
2268 return 0;
2269}
2270
Steve Frenchcdeaf9d2018-08-10 02:25:06 -05002271/* See MS-SMB2 2.2.13.2.7 */
2272static struct crt_twarp_ctxt *
2273create_twarp_buf(__u64 timewarp)
2274{
2275 struct crt_twarp_ctxt *buf;
2276
2277 buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
2278 if (!buf)
2279 return NULL;
2280
2281 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2282 (struct crt_twarp_ctxt, Timestamp));
2283 buf->ccontext.DataLength = cpu_to_le32(8);
2284 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2285 (struct crt_twarp_ctxt, Name));
2286 buf->ccontext.NameLength = cpu_to_le16(4);
2287 /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
2288 buf->Name[0] = 'T';
2289 buf->Name[1] = 'W';
2290 buf->Name[2] = 'r';
2291 buf->Name[3] = 'p';
2292 buf->Timestamp = cpu_to_le64(timewarp);
2293 return buf;
2294}
2295
2296/* See MS-SMB2 2.2.13.2.7 */
2297static int
2298add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
2299{
2300 struct smb2_create_req *req = iov[0].iov_base;
2301 unsigned int num = *num_iovec;
2302
2303 iov[num].iov_base = create_twarp_buf(timewarp);
2304 if (iov[num].iov_base == NULL)
2305 return -ENOMEM;
2306 iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
2307 if (!req->CreateContextsOffset)
2308 req->CreateContextsOffset = cpu_to_le32(
2309 sizeof(struct smb2_create_req) +
2310 iov[num - 1].iov_len);
2311 le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt));
2312 *num_iovec = num + 1;
2313 return 0;
2314}
2315
Steve French975221e2020-06-12 09:25:21 -05002316/* See See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
2317static void setup_owner_group_sids(char *buf)
2318{
2319 struct owner_group_sids *sids = (struct owner_group_sids *)buf;
2320
2321 /* Populate the user ownership fields S-1-5-88-1 */
2322 sids->owner.Revision = 1;
2323 sids->owner.NumAuth = 3;
2324 sids->owner.Authority[5] = 5;
2325 sids->owner.SubAuthorities[0] = cpu_to_le32(88);
2326 sids->owner.SubAuthorities[1] = cpu_to_le32(1);
2327 sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val);
2328
2329 /* Populate the group ownership fields S-1-5-88-2 */
2330 sids->group.Revision = 1;
2331 sids->group.NumAuth = 3;
2332 sids->group.Authority[5] = 5;
2333 sids->group.SubAuthorities[0] = cpu_to_le32(88);
2334 sids->group.SubAuthorities[1] = cpu_to_le32(2);
2335 sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val);
Steve Frencha7a519a2020-06-12 14:49:47 -05002336
2337 cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val);
Steve French975221e2020-06-12 09:25:21 -05002338}
2339
Steve Frenchfdef6652019-12-06 02:02:38 -06002340/* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */
2341static struct crt_sd_ctxt *
Steve French975221e2020-06-12 09:25:21 -05002342create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
Steve Frenchfdef6652019-12-06 02:02:38 -06002343{
2344 struct crt_sd_ctxt *buf;
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002345 __u8 *ptr, *aclptr;
2346 unsigned int acelen, acl_size, ace_count;
Steve French975221e2020-06-12 09:25:21 -05002347 unsigned int owner_offset = 0;
2348 unsigned int group_offset = 0;
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002349 struct smb3_acl acl;
Steve Frenchfdef6652019-12-06 02:02:38 -06002350
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002351 *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
Steve French975221e2020-06-12 09:25:21 -05002352
2353 if (set_owner) {
Steve French975221e2020-06-12 09:25:21 -05002354 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
2355 *len += sizeof(struct owner_group_sids);
2356 }
2357
Steve Frenchfdef6652019-12-06 02:02:38 -06002358 buf = kzalloc(*len, GFP_KERNEL);
2359 if (buf == NULL)
2360 return buf;
2361
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002362 ptr = (__u8 *)&buf[1];
Steve French975221e2020-06-12 09:25:21 -05002363 if (set_owner) {
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002364 /* offset fields are from beginning of security descriptor not of create context */
2365 owner_offset = ptr - (__u8 *)&buf->sd;
Steve French975221e2020-06-12 09:25:21 -05002366 buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002367 group_offset = owner_offset + offsetof(struct owner_group_sids, group);
Steve French975221e2020-06-12 09:25:21 -05002368 buf->sd.OffsetGroup = cpu_to_le32(group_offset);
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002369
2370 setup_owner_group_sids(ptr);
2371 ptr += sizeof(struct owner_group_sids);
Steve French975221e2020-06-12 09:25:21 -05002372 } else {
2373 buf->sd.OffsetOwner = 0;
2374 buf->sd.OffsetGroup = 0;
2375 }
2376
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002377 buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
Steve French975221e2020-06-12 09:25:21 -05002378 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
Steve Frenchfdef6652019-12-06 02:02:38 -06002379 buf->ccontext.NameLength = cpu_to_le16(4);
2380 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
2381 buf->Name[0] = 'S';
2382 buf->Name[1] = 'e';
2383 buf->Name[2] = 'c';
2384 buf->Name[3] = 'D';
2385 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002386
Steve Frenchfdef6652019-12-06 02:02:38 -06002387 /*
2388 * ACL is "self relative" ie ACL is stored in contiguous block of memory
2389 * and "DP" ie the DACL is present
2390 */
2391 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
2392
2393 /* offset owner, group and Sbz1 and SACL are all zero */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002394 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2395 /* Ship the ACL for now. we will copy it into buf later. */
2396 aclptr = ptr;
Steve Frenchb06d8932021-09-23 16:00:31 -05002397 ptr += sizeof(struct smb3_acl);
Steve Frenchfdef6652019-12-06 02:02:38 -06002398
2399 /* create one ACE to hold the mode embedded in reserved special SID */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002400 acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
2401 ptr += acelen;
2402 acl_size = acelen + sizeof(struct smb3_acl);
2403 ace_count = 1;
Steve French975221e2020-06-12 09:25:21 -05002404
2405 if (set_owner) {
2406 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002407 acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr);
2408 ptr += acelen;
2409 acl_size += acelen;
2410 ace_count += 1;
2411 }
Steve French975221e2020-06-12 09:25:21 -05002412
Steve French643fbce2020-01-16 19:55:33 -06002413 /* and one more ACE to allow access for authenticated users */
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002414 acelen = setup_authusers_ACE((struct cifs_ace *)ptr);
2415 ptr += acelen;
2416 acl_size += acelen;
2417 ace_count += 1;
Steve French975221e2020-06-12 09:25:21 -05002418
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002419 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
2420 acl.AclSize = cpu_to_le16(acl_size);
2421 acl.AceCount = cpu_to_le16(ace_count);
Steve Frenchb06d8932021-09-23 16:00:31 -05002422 memcpy(aclptr, &acl, sizeof(struct smb3_acl));
Ronnie Sahlbergea643702020-11-30 11:29:20 +10002423
2424 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
Shyam Prasad N7d3fc012021-08-04 18:37:22 +00002425 *len = roundup(ptr - (__u8 *)buf, 8);
Steve French975221e2020-06-12 09:25:21 -05002426
Steve Frenchfdef6652019-12-06 02:02:38 -06002427 return buf;
2428}
2429
2430static int
Steve French975221e2020-06-12 09:25:21 -05002431add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner)
Steve Frenchfdef6652019-12-06 02:02:38 -06002432{
2433 struct smb2_create_req *req = iov[0].iov_base;
2434 unsigned int num = *num_iovec;
2435 unsigned int len = 0;
2436
Steve French975221e2020-06-12 09:25:21 -05002437 iov[num].iov_base = create_sd_buf(mode, set_owner, &len);
Steve Frenchfdef6652019-12-06 02:02:38 -06002438 if (iov[num].iov_base == NULL)
2439 return -ENOMEM;
2440 iov[num].iov_len = len;
2441 if (!req->CreateContextsOffset)
2442 req->CreateContextsOffset = cpu_to_le32(
2443 sizeof(struct smb2_create_req) +
2444 iov[num - 1].iov_len);
2445 le32_add_cpu(&req->CreateContextsLength, len);
2446 *num_iovec = num + 1;
2447 return 0;
2448}
2449
Steve Frenchff2a09e2019-07-06 14:41:38 -05002450static struct crt_query_id_ctxt *
2451create_query_id_buf(void)
2452{
2453 struct crt_query_id_ctxt *buf;
2454
2455 buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL);
2456 if (!buf)
2457 return NULL;
2458
2459 buf->ccontext.DataOffset = cpu_to_le16(0);
2460 buf->ccontext.DataLength = cpu_to_le32(0);
2461 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2462 (struct crt_query_id_ctxt, Name));
2463 buf->ccontext.NameLength = cpu_to_le16(4);
2464 /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */
2465 buf->Name[0] = 'Q';
2466 buf->Name[1] = 'F';
2467 buf->Name[2] = 'i';
2468 buf->Name[3] = 'd';
2469 return buf;
2470}
2471
2472/* See MS-SMB2 2.2.13.2.9 */
2473static int
2474add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
2475{
2476 struct smb2_create_req *req = iov[0].iov_base;
2477 unsigned int num = *num_iovec;
2478
2479 iov[num].iov_base = create_query_id_buf();
2480 if (iov[num].iov_base == NULL)
2481 return -ENOMEM;
2482 iov[num].iov_len = sizeof(struct crt_query_id_ctxt);
2483 if (!req->CreateContextsOffset)
2484 req->CreateContextsOffset = cpu_to_le32(
2485 sizeof(struct smb2_create_req) +
2486 iov[num - 1].iov_len);
2487 le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_query_id_ctxt));
2488 *num_iovec = num + 1;
2489 return 0;
2490}
2491
Aurelien Aptelf0712922017-02-22 14:47:17 +01002492static int
2493alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
2494 const char *treename, const __le16 *path)
2495{
2496 int treename_len, path_len;
2497 struct nls_table *cp;
2498 const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
2499
2500 /*
2501 * skip leading "\\"
2502 */
2503 treename_len = strlen(treename);
2504 if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
2505 return -EINVAL;
2506
2507 treename += 2;
2508 treename_len -= 2;
2509
2510 path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
2511
2512 /*
2513 * make room for one path separator between the treename and
2514 * path
2515 */
2516 *out_len = treename_len + 1 + path_len;
2517
2518 /*
2519 * final path needs to be null-terminated UTF16 with a
2520 * size aligned to 8
2521 */
2522
2523 *out_size = roundup((*out_len+1)*2, 8);
2524 *out_path = kzalloc(*out_size, GFP_KERNEL);
2525 if (!*out_path)
2526 return -ENOMEM;
2527
2528 cp = load_nls_default();
2529 cifs_strtoUTF16(*out_path, treename, treename_len, cp);
2530 UniStrcat(*out_path, sep);
2531 UniStrcat(*out_path, path);
2532 unload_nls(cp);
2533
2534 return 0;
2535}
2536
Steve Frenchbea851b2018-06-14 21:56:32 -05002537int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
2538 umode_t mode, struct cifs_tcon *tcon,
2539 const char *full_path,
2540 struct cifs_sb_info *cifs_sb)
2541{
2542 struct smb_rqst rqst;
2543 struct smb2_create_req *req;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002544 struct smb2_create_rsp *rsp = NULL;
Steve Frenchbea851b2018-06-14 21:56:32 -05002545 struct cifs_ses *ses = tcon->ses;
2546 struct kvec iov[3]; /* make sure at least one for each open context */
2547 struct kvec rsp_iov = {NULL, 0};
2548 int resp_buftype;
2549 int uni_path_len;
2550 __le16 *copy_path = NULL;
2551 int copy_size;
2552 int rc = 0;
2553 unsigned int n_iov = 2;
2554 __u32 file_attributes = 0;
2555 char *pc_buf = NULL;
2556 int flags = 0;
2557 unsigned int total_len;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002558 __le16 *utf16_path = NULL;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002559 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve Frenchbea851b2018-06-14 21:56:32 -05002560
2561 cifs_dbg(FYI, "mkdir\n");
2562
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002563 /* resource #1: path allocation */
2564 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2565 if (!utf16_path)
2566 return -ENOMEM;
2567
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002568 if (!ses || !server) {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002569 rc = -EIO;
2570 goto err_free_path;
2571 }
Steve Frenchbea851b2018-06-14 21:56:32 -05002572
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002573 /* resource #2: request */
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002574 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2575 (void **) &req, &total_len);
Steve Frenchbea851b2018-06-14 21:56:32 -05002576 if (rc)
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002577 goto err_free_path;
2578
Steve Frenchbea851b2018-06-14 21:56:32 -05002579
2580 if (smb3_encryption_required(tcon))
2581 flags |= CIFS_TRANSFORM_REQ;
2582
Steve Frenchbea851b2018-06-14 21:56:32 -05002583 req->ImpersonationLevel = IL_IMPERSONATION;
2584 req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
2585 /* File attributes ignored on open (used in create though) */
2586 req->FileAttributes = cpu_to_le32(file_attributes);
2587 req->ShareAccess = FILE_SHARE_ALL_LE;
2588 req->CreateDisposition = cpu_to_le32(FILE_CREATE);
2589 req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
2590
2591 iov[0].iov_base = (char *)req;
2592 /* -1 since last byte is buf[0] which is sent below (path) */
2593 iov[0].iov_len = total_len - 1;
2594
2595 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
2596
2597 /* [MS-SMB2] 2.2.13 NameOffset:
2598 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2599 * the SMB2 header, the file name includes a prefix that will
2600 * be processed during DFS name normalization as specified in
2601 * section 3.3.5.9. Otherwise, the file name is relative to
2602 * the share that is identified by the TreeId in the SMB2
2603 * header.
2604 */
2605 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2606 int name_len;
2607
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002608 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
Steve Frenchbea851b2018-06-14 21:56:32 -05002609 rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
2610 &name_len,
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002611 tcon->treeName, utf16_path);
2612 if (rc)
2613 goto err_free_req;
2614
Steve Frenchbea851b2018-06-14 21:56:32 -05002615 req->NameLength = cpu_to_le16(name_len * 2);
2616 uni_path_len = copy_size;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002617 /* free before overwriting resource */
2618 kfree(utf16_path);
2619 utf16_path = copy_path;
Steve Frenchbea851b2018-06-14 21:56:32 -05002620 } else {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002621 uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
Steve Frenchbea851b2018-06-14 21:56:32 -05002622 /* MUST set path len (NameLength) to 0 opening root of share */
2623 req->NameLength = cpu_to_le16(uni_path_len - 2);
2624 if (uni_path_len % 8 != 0) {
2625 copy_size = roundup(uni_path_len, 8);
2626 copy_path = kzalloc(copy_size, GFP_KERNEL);
2627 if (!copy_path) {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002628 rc = -ENOMEM;
2629 goto err_free_req;
Steve Frenchbea851b2018-06-14 21:56:32 -05002630 }
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002631 memcpy((char *)copy_path, (const char *)utf16_path,
Steve Frenchbea851b2018-06-14 21:56:32 -05002632 uni_path_len);
2633 uni_path_len = copy_size;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002634 /* free before overwriting resource */
2635 kfree(utf16_path);
2636 utf16_path = copy_path;
Steve Frenchbea851b2018-06-14 21:56:32 -05002637 }
2638 }
2639
2640 iov[1].iov_len = uni_path_len;
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002641 iov[1].iov_base = utf16_path;
Steve Frenchbea851b2018-06-14 21:56:32 -05002642 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
2643
2644 if (tcon->posix_extensions) {
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002645 /* resource #3: posix buf */
Steve Frenchbea851b2018-06-14 21:56:32 -05002646 rc = add_posix_context(iov, &n_iov, mode);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002647 if (rc)
2648 goto err_free_req;
Steve Frenchbea851b2018-06-14 21:56:32 -05002649 pc_buf = iov[n_iov-1].iov_base;
2650 }
2651
2652
2653 memset(&rqst, 0, sizeof(struct smb_rqst));
2654 rqst.rq_iov = iov;
2655 rqst.rq_nvec = n_iov;
2656
Steve Frenchd2f15422019-09-22 00:55:46 -05002657 /* no need to inc num_remote_opens because we close it just below */
Steve Frenchefe2e9f2019-02-26 19:08:12 -06002658 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
2659 FILE_WRITE_ATTRIBUTES);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002660 /* resource #4: response buffer */
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002661 rc = cifs_send_recv(xid, ses, server,
2662 &rqst, &resp_buftype, flags, &rsp_iov);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002663 if (rc) {
Steve Frenchbea851b2018-06-14 21:56:32 -05002664 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
2665 trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002666 CREATE_NOT_FILE,
2667 FILE_WRITE_ATTRIBUTES, rc);
2668 goto err_free_rsp_buf;
2669 }
2670
2671 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
Ronnie Sahlbergc4628702021-09-08 12:10:15 +10002672 trace_smb3_posix_mkdir_done(xid, le64_to_cpu(rsp->PersistentFileId),
2673 tcon->tid,
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002674 ses->Suid, CREATE_NOT_FILE,
2675 FILE_WRITE_ATTRIBUTES);
Steve Frenchbea851b2018-06-14 21:56:32 -05002676
Ronnie Sahlbergc4628702021-09-08 12:10:15 +10002677 SMB2_close(xid, tcon, le64_to_cpu(rsp->PersistentFileId),
2678 le64_to_cpu(rsp->VolatileFileId));
Steve Frenchbea851b2018-06-14 21:56:32 -05002679
2680 /* Eventually save off posix specific response info and timestaps */
2681
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002682err_free_rsp_buf:
Steve Frenchbea851b2018-06-14 21:56:32 -05002683 free_rsp_buf(resp_buftype, rsp);
Aurelien Aptel256b4c32018-06-19 15:18:48 -07002684 kfree(pc_buf);
2685err_free_req:
2686 cifs_small_buf_release(req);
2687err_free_path:
2688 kfree(utf16_path);
Steve Frenchbea851b2018-06-14 21:56:32 -05002689 return rc;
Steve Frenchbea851b2018-06-14 21:56:32 -05002690}
Steve Frenchbea851b2018-06-14 21:56:32 -05002691
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002692int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002693SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
2694 struct smb_rqst *rqst, __u8 *oplock,
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002695 struct cifs_open_parms *oparms, __le16 *path)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002696{
2697 struct smb2_create_req *req;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002698 unsigned int n_iov = 2;
Pavel Shilovskyca819832013-07-05 12:21:26 +04002699 __u32 file_attributes = 0;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002700 int copy_size;
2701 int uni_path_len;
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002702 unsigned int total_len;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002703 struct kvec *iov = rqst->rq_iov;
2704 __le16 *copy_path;
2705 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002706
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002707 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2708 (void **) &req, &total_len);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002709 if (rc)
2710 return rc;
2711
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002712 iov[0].iov_base = (char *)req;
2713 /* -1 since last byte is buf[0] which is sent below (path) */
2714 iov[0].iov_len = total_len - 1;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07002715
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002716 if (oparms->create_options & CREATE_OPTION_READONLY)
Pavel Shilovskyca819832013-07-05 12:21:26 +04002717 file_attributes |= ATTR_READONLY;
Steve Frenchdb8b6312014-09-22 05:13:55 -05002718 if (oparms->create_options & CREATE_OPTION_SPECIAL)
2719 file_attributes |= ATTR_SYSTEM;
Pavel Shilovskyca819832013-07-05 12:21:26 +04002720
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002721 req->ImpersonationLevel = IL_IMPERSONATION;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002722 req->DesiredAccess = cpu_to_le32(oparms->desired_access);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002723 /* File attributes ignored on open (used in create though) */
2724 req->FileAttributes = cpu_to_le32(file_attributes);
2725 req->ShareAccess = FILE_SHARE_ALL_LE;
Steve Frenchc3ca78e2019-09-25 00:32:13 -05002726
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002727 req->CreateDisposition = cpu_to_le32(oparms->disposition);
2728 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002729 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
Aurelien Aptelf0712922017-02-22 14:47:17 +01002730
2731 /* [MS-SMB2] 2.2.13 NameOffset:
2732 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2733 * the SMB2 header, the file name includes a prefix that will
2734 * be processed during DFS name normalization as specified in
2735 * section 3.3.5.9. Otherwise, the file name is relative to
2736 * the share that is identified by the TreeId in the SMB2
2737 * header.
2738 */
2739 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2740 int name_len;
2741
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002742 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
Aurelien Aptelf0712922017-02-22 14:47:17 +01002743 rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
2744 &name_len,
2745 tcon->treeName, path);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002746 if (rc)
Aurelien Aptelf0712922017-02-22 14:47:17 +01002747 return rc;
2748 req->NameLength = cpu_to_le16(name_len * 2);
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04002749 uni_path_len = copy_size;
2750 path = copy_path;
Aurelien Aptelf0712922017-02-22 14:47:17 +01002751 } else {
2752 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
2753 /* MUST set path len (NameLength) to 0 opening root of share */
2754 req->NameLength = cpu_to_le16(uni_path_len - 2);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002755 copy_size = uni_path_len;
2756 if (copy_size % 8 != 0)
2757 copy_size = roundup(copy_size, 8);
2758 copy_path = kzalloc(copy_size, GFP_KERNEL);
2759 if (!copy_path)
2760 return -ENOMEM;
2761 memcpy((char *)copy_path, (const char *)path,
2762 uni_path_len);
2763 uni_path_len = copy_size;
2764 path = copy_path;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002765 }
2766
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04002767 iov[1].iov_len = uni_path_len;
2768 iov[1].iov_base = path;
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04002769
Steve French3e7a02d2019-09-11 21:46:20 -05002770 if ((!server->oplocks) || (tcon->no_lease))
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002771 *oplock = SMB2_OPLOCK_LEVEL_NONE;
2772
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002773 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002774 *oplock == SMB2_OPLOCK_LEVEL_NONE)
2775 req->RequestedOplockLevel = *oplock;
Steve Frenchf8015682018-08-31 15:12:10 -05002776 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
2777 (oparms->create_options & CREATE_NOT_FILE))
2778 req->RequestedOplockLevel = *oplock; /* no srv lease support */
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002779 else {
Stefano Brivio729c0c92018-07-05 15:10:02 +02002780 rc = add_lease_context(server, iov, &n_iov,
2781 oparms->fid->lease_key, oplock);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002782 if (rc)
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04002783 return rc;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002784 }
2785
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002786 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
2787 /* need to set Next field of lease context if we request it */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002788 if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002789 struct create_context *ccontext =
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002790 (struct create_context *)iov[n_iov-1].iov_base;
Steve French1c469432013-07-10 12:50:57 -05002791 ccontext->Next =
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002792 cpu_to_le32(server->vals->create_lease_size);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002793 }
Steve Frenchb56eae42015-11-03 09:26:27 -06002794
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002795 rc = add_durable_context(iov, &n_iov, oparms,
Steve Frenchb56eae42015-11-03 09:26:27 -06002796 tcon->use_persistent);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002797 if (rc)
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002798 return rc;
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04002799 }
2800
Steve Frenchce558b02018-05-31 19:16:54 -05002801 if (tcon->posix_extensions) {
2802 if (n_iov > 2) {
2803 struct create_context *ccontext =
2804 (struct create_context *)iov[n_iov-1].iov_base;
2805 ccontext->Next =
2806 cpu_to_le32(iov[n_iov-1].iov_len);
2807 }
2808
2809 rc = add_posix_context(iov, &n_iov, oparms->mode);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002810 if (rc)
Steve Frenchce558b02018-05-31 19:16:54 -05002811 return rc;
Steve Frenchce558b02018-05-31 19:16:54 -05002812 }
Steve Frenchce558b02018-05-31 19:16:54 -05002813
Steve Frenchcdeaf9d2018-08-10 02:25:06 -05002814 if (tcon->snapshot_time) {
2815 cifs_dbg(FYI, "adding snapshot context\n");
2816 if (n_iov > 2) {
2817 struct create_context *ccontext =
2818 (struct create_context *)iov[n_iov-1].iov_base;
2819 ccontext->Next =
2820 cpu_to_le32(iov[n_iov-1].iov_len);
2821 }
2822
2823 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
2824 if (rc)
2825 return rc;
2826 }
2827
Steve French975221e2020-06-12 09:25:21 -05002828 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) {
2829 bool set_mode;
2830 bool set_owner;
2831
2832 if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) &&
2833 (oparms->mode != ACL_NO_MODE))
2834 set_mode = true;
2835 else {
2836 set_mode = false;
2837 oparms->mode = ACL_NO_MODE;
Steve Frenchc3ca78e2019-09-25 00:32:13 -05002838 }
2839
Steve French975221e2020-06-12 09:25:21 -05002840 if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
2841 set_owner = true;
2842 else
2843 set_owner = false;
2844
2845 if (set_owner | set_mode) {
2846 if (n_iov > 2) {
2847 struct create_context *ccontext =
2848 (struct create_context *)iov[n_iov-1].iov_base;
2849 ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
2850 }
2851
2852 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode);
2853 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
2854 if (rc)
2855 return rc;
2856 }
Steve Frenchc3ca78e2019-09-25 00:32:13 -05002857 }
2858
Steve Frenchff2a09e2019-07-06 14:41:38 -05002859 if (n_iov > 2) {
2860 struct create_context *ccontext =
2861 (struct create_context *)iov[n_iov-1].iov_base;
2862 ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
2863 }
2864 add_query_id_context(iov, &n_iov);
Steve Frenchcdeaf9d2018-08-10 02:25:06 -05002865
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002866 rqst->rq_nvec = n_iov;
2867 return 0;
2868}
2869
2870/* rq_iov[0] is the request and is released by cifs_small_buf_release().
2871 * All other vectors are freed by kfree().
2872 */
2873void
2874SMB2_open_free(struct smb_rqst *rqst)
2875{
2876 int i;
2877
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10002878 if (rqst && rqst->rq_iov) {
2879 cifs_small_buf_release(rqst->rq_iov[0].iov_base);
2880 for (i = 1; i < rqst->rq_nvec; i++)
2881 if (rqst->rq_iov[i].iov_base != smb2_padding)
2882 kfree(rqst->rq_iov[i].iov_base);
2883 }
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002884}
2885
2886int
2887SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2888 __u8 *oplock, struct smb2_file_all_info *buf,
Aurelien Aptel69dda302020-03-02 17:53:22 +01002889 struct create_posix_rsp *posix,
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002890 struct kvec *err_iov, int *buftype)
2891{
2892 struct smb_rqst rqst;
2893 struct smb2_create_rsp *rsp = NULL;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002894 struct cifs_tcon *tcon = oparms->tcon;
2895 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002896 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002897 struct kvec iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002898 struct kvec rsp_iov = {NULL, 0};
Garry McNultyef2298a2018-10-03 20:51:21 +01002899 int resp_buftype = CIFS_NO_BUFFER;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002900 int rc = 0;
2901 int flags = 0;
2902
2903 cifs_dbg(FYI, "create/open\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002904 if (!ses || !server)
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002905 return -EIO;
2906
2907 if (smb3_encryption_required(tcon))
2908 flags |= CIFS_TRANSFORM_REQ;
2909
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10002910 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002911 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10002912 rqst.rq_iov = iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002913 rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002914
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002915 rc = SMB2_open_init(tcon, server,
2916 &rqst, oplock, oparms, path);
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002917 if (rc)
2918 goto creat_exit;
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10002919
Steve Frenchefe2e9f2019-02-26 19:08:12 -06002920 trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
2921 oparms->create_options, oparms->desired_access);
2922
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002923 rc = cifs_send_recv(xid, ses, server,
2924 &rqst, &resp_buftype, flags,
Ronnie Sahlberg4f33bc32017-11-20 11:24:38 +11002925 &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07002926 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002927
2928 if (rc != 0) {
2929 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002930 if (err_iov && rsp) {
2931 *err_iov = rsp_iov;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002932 *buftype = resp_buftype;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002933 resp_buftype = CIFS_NO_BUFFER;
2934 rsp = NULL;
2935 }
Steve French28d59362018-05-30 21:42:34 -05002936 trace_smb3_open_err(xid, tcon->tid, ses->Suid,
2937 oparms->create_options, oparms->desired_access, rc);
Steve French7dcc82c2019-09-11 00:07:36 -05002938 if (rc == -EREMCHG) {
Joe Perchesa0a30362020-04-14 22:42:53 -07002939 pr_warn_once("server share %s deleted\n",
2940 tcon->treeName);
Steve French7dcc82c2019-09-11 00:07:36 -05002941 tcon->need_reconnect = true;
2942 }
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002943 goto creat_exit;
Steve French6b789512021-11-11 16:10:00 -06002944 } else if (rsp == NULL) /* unlikely to happen, but safer to check */
2945 goto creat_exit;
2946 else
Ronnie Sahlbergc4628702021-09-08 12:10:15 +10002947 trace_smb3_open_done(xid, le64_to_cpu(rsp->PersistentFileId),
2948 tcon->tid,
Steve French28d59362018-05-30 21:42:34 -05002949 ses->Suid, oparms->create_options,
2950 oparms->desired_access);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002951
Steve Frenchfae80442018-10-19 17:14:32 -05002952 atomic_inc(&tcon->num_remote_opens);
Ronnie Sahlbergc4628702021-09-08 12:10:15 +10002953 oparms->fid->persistent_fid = le64_to_cpu(rsp->PersistentFileId);
2954 oparms->fid->volatile_fid = le64_to_cpu(rsp->VolatileFileId);
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002955 oparms->fid->access = oparms->desired_access;
Steve Frenchdfe33f92018-10-30 19:50:31 -05002956#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002957 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
Steve Frenchdfe33f92018-10-30 19:50:31 -05002958#endif /* CIFS_DEBUG2 */
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07002959
2960 if (buf) {
Kees Cookfbcff332021-06-21 14:44:46 -07002961 buf->CreationTime = rsp->CreationTime;
2962 buf->LastAccessTime = rsp->LastAccessTime;
2963 buf->LastWriteTime = rsp->LastWriteTime;
2964 buf->ChangeTime = rsp->ChangeTime;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07002965 buf->AllocationSize = rsp->AllocationSize;
2966 buf->EndOfFile = rsp->EndofFile;
2967 buf->Attributes = rsp->FileAttributes;
2968 buf->NumberOfLinks = cpu_to_le32(1);
2969 buf->DeletePending = 0;
2970 }
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002971
Steve French89a5bfa2019-07-18 17:22:18 -05002972
2973 smb2_parse_contexts(server, rsp, &oparms->fid->epoch,
Aurelien Aptel69dda302020-03-02 17:53:22 +01002974 oparms->fid->lease_key, oplock, buf, posix);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002975creat_exit:
Ronnie Sahlberg1eb9fb52018-08-08 15:07:46 +10002976 SMB2_open_free(&rqst);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04002977 free_rsp_buf(resp_buftype, rsp);
2978 return rc;
2979}
2980
Steve French4a72daf2013-06-25 00:20:49 -05002981int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002982SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
2983 struct smb_rqst *rqst,
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10002984 u64 persistent_fid, u64 volatile_fid, u32 opcode,
Steve French153322f2019-03-28 22:32:49 -05002985 bool is_fsctl, char *in_data, u32 indatalen,
2986 __u32 max_response_size)
Steve French4a72daf2013-06-25 00:20:49 -05002987{
2988 struct smb2_ioctl_req *req;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10002989 struct kvec *iov = rqst->rq_iov;
Ronnie Sahlberg97754682017-11-09 12:14:20 +11002990 unsigned int total_len;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10002991 int rc;
Long Li2c87d6a2019-05-15 14:09:05 -07002992 char *in_data_buf;
Steve French4a72daf2013-06-25 00:20:49 -05002993
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002994 rc = smb2_ioctl_req_init(opcode, tcon, server,
2995 (void **) &req, &total_len);
Steve French4a72daf2013-06-25 00:20:49 -05002996 if (rc)
2997 return rc;
2998
Long Li2c87d6a2019-05-15 14:09:05 -07002999 if (indatalen) {
3000 /*
3001 * indatalen is usually small at a couple of bytes max, so
3002 * just allocate through generic pool
3003 */
YueHaibingd81f0972019-06-01 03:31:10 +00003004 in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS);
Long Li2c87d6a2019-05-15 14:09:05 -07003005 if (!in_data_buf) {
3006 cifs_small_buf_release(req);
3007 return -ENOMEM;
3008 }
Long Li2c87d6a2019-05-15 14:09:05 -07003009 }
3010
Steve French4a72daf2013-06-25 00:20:49 -05003011 req->CtlCode = cpu_to_le32(opcode);
3012 req->PersistentFileId = persistent_fid;
3013 req->VolatileFileId = volatile_fid;
3014
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003015 iov[0].iov_base = (char *)req;
3016 /*
3017 * If no input data, the size of ioctl struct in
3018 * protocol spec still includes a 1 byte data buffer,
3019 * but if input data passed to ioctl, we do not
3020 * want to double count this, so we do not send
3021 * the dummy one byte of data in iovec[0] if sending
3022 * input data (in iovec[1]).
3023 */
Steve French4a72daf2013-06-25 00:20:49 -05003024 if (indatalen) {
3025 req->InputCount = cpu_to_le32(indatalen);
3026 /* do not set InputOffset if no input data */
3027 req->InputOffset =
Ronnie Sahlberg97754682017-11-09 12:14:20 +11003028 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003029 rqst->rq_nvec = 2;
3030 iov[0].iov_len = total_len - 1;
Long Li2c87d6a2019-05-15 14:09:05 -07003031 iov[1].iov_base = in_data_buf;
Steve French4a72daf2013-06-25 00:20:49 -05003032 iov[1].iov_len = indatalen;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003033 } else {
3034 rqst->rq_nvec = 1;
3035 iov[0].iov_len = total_len;
3036 }
Steve French4a72daf2013-06-25 00:20:49 -05003037
3038 req->OutputOffset = 0;
3039 req->OutputCount = 0; /* MBZ */
3040
3041 /*
Steve French153322f2019-03-28 22:32:49 -05003042 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
3043 * We Could increase default MaxOutputResponse, but that could require
3044 * more credits. Windows typically sets this smaller, but for some
Steve French4a72daf2013-06-25 00:20:49 -05003045 * ioctls it may be useful to allow server to send more. No point
3046 * limiting what the server can send as long as fits in one credit
Steve French153322f2019-03-28 22:32:49 -05003047 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
3048 * to increase this limit up in the future.
3049 * Note that for snapshot queries that servers like Azure expect that
3050 * the first query be minimal size (and just used to get the number/size
3051 * of previous versions) so response size must be specified as EXACTLY
3052 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
3053 * of eight bytes. Currently that is the only case where we set max
3054 * response size smaller.
Steve French4a72daf2013-06-25 00:20:49 -05003055 */
Steve French153322f2019-03-28 22:32:49 -05003056 req->MaxOutputResponse = cpu_to_le32(max_response_size);
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003057 req->hdr.CreditCharge =
Namjae Jeonebf57442020-06-11 11:21:19 +09003058 cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
3059 SMB2_MAX_BUFFER_SIZE));
Steve French4a72daf2013-06-25 00:20:49 -05003060 if (is_fsctl)
3061 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
3062 else
3063 req->Flags = 0;
3064
Steve French4587eee2017-10-25 15:58:31 -05003065 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
3066 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003067 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Steve French4a72daf2013-06-25 00:20:49 -05003068
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003069 return 0;
3070}
3071
3072void
3073SMB2_ioctl_free(struct smb_rqst *rqst)
3074{
Murphy Zhou6457c202019-05-23 12:12:43 +08003075 int i;
Long Li2c87d6a2019-05-15 14:09:05 -07003076 if (rqst && rqst->rq_iov) {
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003077 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
Murphy Zhou6457c202019-05-23 12:12:43 +08003078 for (i = 1; i < rqst->rq_nvec; i++)
3079 if (rqst->rq_iov[i].iov_base != smb2_padding)
3080 kfree(rqst->rq_iov[i].iov_base);
Long Li2c87d6a2019-05-15 14:09:05 -07003081 }
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003082}
3083
Steve French153322f2019-03-28 22:32:49 -05003084
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003085/*
3086 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
3087 */
3088int
3089SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3090 u64 volatile_fid, u32 opcode, bool is_fsctl,
Steve French153322f2019-03-28 22:32:49 -05003091 char *in_data, u32 indatalen, u32 max_out_data_len,
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003092 char **out_data, u32 *plen /* returned data len */)
3093{
3094 struct smb_rqst rqst;
3095 struct smb2_ioctl_rsp *rsp = NULL;
3096 struct cifs_ses *ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003097 struct TCP_Server_Info *server;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003098 struct kvec iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003099 struct kvec rsp_iov = {NULL, 0};
3100 int resp_buftype = CIFS_NO_BUFFER;
3101 int rc = 0;
3102 int flags = 0;
3103
3104 cifs_dbg(FYI, "SMB2 IOCTL\n");
3105
3106 if (out_data != NULL)
3107 *out_data = NULL;
3108
3109 /* zero out returned data len, in case of error */
3110 if (plen)
3111 *plen = 0;
3112
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003113 if (!tcon)
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003114 return -EIO;
3115
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003116 ses = tcon->ses;
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003117 if (!ses)
3118 return -EIO;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003119
3120 server = cifs_pick_channel(ses);
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003121 if (!server)
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003122 return -EIO;
3123
3124 if (smb3_encryption_required(tcon))
3125 flags |= CIFS_TRANSFORM_REQ;
3126
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003127 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003128 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003129 rqst.rq_iov = iov;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003130 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003131
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003132 rc = SMB2_ioctl_init(tcon, server,
3133 &rqst, persistent_fid, volatile_fid, opcode,
Steve French153322f2019-03-28 22:32:49 -05003134 is_fsctl, in_data, indatalen, max_out_data_len);
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003135 if (rc)
3136 goto ioctl_exit;
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003137
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003138 rc = cifs_send_recv(xid, ses, server,
3139 &rqst, &resp_buftype, flags,
Ronnie Sahlberg97754682017-11-09 12:14:20 +11003140 &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003141 rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
Steve French4a72daf2013-06-25 00:20:49 -05003142
Steve Frencheccb4422018-05-17 21:16:55 -05003143 if (rc != 0)
3144 trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
3145 ses->Suid, 0, opcode, rc);
3146
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003147 if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) {
Steve French8e353102015-03-26 19:47:02 -05003148 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
Steve French4a72daf2013-06-25 00:20:49 -05003149 goto ioctl_exit;
Steve French9bf0c9c2013-11-16 18:05:28 -06003150 } else if (rc == -EINVAL) {
3151 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
3152 (opcode != FSCTL_SRV_COPYCHUNK)) {
Steve French8e353102015-03-26 19:47:02 -05003153 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
Steve French9bf0c9c2013-11-16 18:05:28 -06003154 goto ioctl_exit;
3155 }
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003156 } else if (rc == -E2BIG) {
3157 if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) {
3158 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3159 goto ioctl_exit;
3160 }
Steve French4a72daf2013-06-25 00:20:49 -05003161 }
3162
3163 /* check if caller wants to look at return data or just return rc */
3164 if ((plen == NULL) || (out_data == NULL))
3165 goto ioctl_exit;
3166
Steve French4d9beec2021-11-11 14:39:23 -06003167 /*
3168 * Although unlikely to be possible for rsp to be null and rc not set,
3169 * adding check below is slightly safer long term (and quiets Coverity
3170 * warning)
3171 */
3172 if (rsp == NULL) {
3173 rc = -EIO;
3174 goto ioctl_exit;
3175 }
3176
Steve French4a72daf2013-06-25 00:20:49 -05003177 *plen = le32_to_cpu(rsp->OutputCount);
3178
3179 /* We check for obvious errors in the output buffer length and offset */
3180 if (*plen == 0)
3181 goto ioctl_exit; /* server returned no data */
Dan Carpenter2d204ee2018-09-10 14:12:07 +03003182 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003183 cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
Steve French4a72daf2013-06-25 00:20:49 -05003184 *plen = 0;
3185 rc = -EIO;
3186 goto ioctl_exit;
3187 }
3188
Dan Carpenter2d204ee2018-09-10 14:12:07 +03003189 if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003190 cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
Steve French4a72daf2013-06-25 00:20:49 -05003191 le32_to_cpu(rsp->OutputOffset));
3192 *plen = 0;
3193 rc = -EIO;
3194 goto ioctl_exit;
3195 }
3196
YueHaibingd034fee2018-09-10 01:33:06 +00003197 *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
3198 *plen, GFP_KERNEL);
Steve French4a72daf2013-06-25 00:20:49 -05003199 if (*out_data == NULL) {
3200 rc = -ENOMEM;
3201 goto ioctl_exit;
3202 }
3203
Steve French4a72daf2013-06-25 00:20:49 -05003204ioctl_exit:
Ronnie Sahlbergccdc77a2019-03-13 14:37:48 +10003205 SMB2_ioctl_free(&rqst);
Steve French4a72daf2013-06-25 00:20:49 -05003206 free_rsp_buf(resp_buftype, rsp);
3207 return rc;
3208}
3209
Steve French64a5cfa2013-10-14 15:31:32 -05003210/*
3211 * Individual callers to ioctl worker function follow
3212 */
3213
3214int
3215SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
3216 u64 persistent_fid, u64 volatile_fid)
3217{
3218 int rc;
Steve French64a5cfa2013-10-14 15:31:32 -05003219 struct compress_ioctl fsctl_input;
3220 char *ret_data = NULL;
3221
3222 fsctl_input.CompressionState =
Fabian Frederickbc09d142014-12-10 15:41:15 -08003223 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
Steve French64a5cfa2013-10-14 15:31:32 -05003224
3225 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
3226 FSCTL_SET_COMPRESSION, true /* is_fsctl */,
3227 (char *)&fsctl_input /* data input */,
Steve French153322f2019-03-28 22:32:49 -05003228 2 /* in data len */, CIFSMaxBufSize /* max out data */,
3229 &ret_data /* out data */, NULL);
Steve French64a5cfa2013-10-14 15:31:32 -05003230
3231 cifs_dbg(FYI, "set compression rc %d\n", rc);
Steve French64a5cfa2013-10-14 15:31:32 -05003232
3233 return rc;
3234}
3235
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003236int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003237SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3238 struct smb_rqst *rqst,
Steve French43f8a6a2019-12-02 21:46:54 -06003239 u64 persistent_fid, u64 volatile_fid, bool query_attrs)
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003240{
3241 struct smb2_close_req *req;
3242 struct kvec *iov = rqst->rq_iov;
3243 unsigned int total_len;
3244 int rc;
3245
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003246 rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
3247 (void **) &req, &total_len);
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003248 if (rc)
3249 return rc;
3250
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003251 req->PersistentFileId = cpu_to_le64(persistent_fid);
3252 req->VolatileFileId = cpu_to_le64(volatile_fid);
Steve French43f8a6a2019-12-02 21:46:54 -06003253 if (query_attrs)
3254 req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
3255 else
3256 req->Flags = 0;
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003257 iov[0].iov_base = (char *)req;
3258 iov[0].iov_len = total_len;
3259
3260 return 0;
3261}
3262
3263void
3264SMB2_close_free(struct smb_rqst *rqst)
3265{
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10003266 if (rqst && rqst->rq_iov)
3267 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003268}
3269
3270int
Steve French43f8a6a2019-12-02 21:46:54 -06003271__SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3272 u64 persistent_fid, u64 volatile_fid,
3273 struct smb2_file_network_open_info *pbuf)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003274{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003275 struct smb_rqst rqst;
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003276 struct smb2_close_rsp *rsp = NULL;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003277 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003278 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003279 struct kvec iov[1];
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003280 struct kvec rsp_iov;
Garry McNultyef2298a2018-10-03 20:51:21 +01003281 int resp_buftype = CIFS_NO_BUFFER;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003282 int rc = 0;
Steve French9e8fae22019-12-02 17:55:41 -06003283 int flags = 0;
Steve French43f8a6a2019-12-02 21:46:54 -06003284 bool query_attrs = false;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003285
Joe Perchesf96637b2013-05-04 22:12:25 -05003286 cifs_dbg(FYI, "Close\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003287
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003288 if (!ses || !server)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003289 return -EIO;
3290
Steve French5a77e752018-05-09 17:43:08 -05003291 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07003292 flags |= CIFS_TRANSFORM_REQ;
3293
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003294 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003295 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003296 rqst.rq_iov = iov;
3297 rqst.rq_nvec = 1;
3298
Steve French43f8a6a2019-12-02 21:46:54 -06003299 /* check if need to ask server to return timestamps in close response */
3300 if (pbuf)
3301 query_attrs = true;
3302
Steve Frenchf90f9792019-09-03 18:35:42 -05003303 trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003304 rc = SMB2_close_init(tcon, server,
3305 &rqst, persistent_fid, volatile_fid,
Steve French43f8a6a2019-12-02 21:46:54 -06003306 query_attrs);
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003307 if (rc)
3308 goto close_exit;
3309
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003310 rc = cifs_send_recv(xid, ses, server,
3311 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003312 rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003313
3314 if (rc != 0) {
Namjae Jeond4a029d2014-08-20 19:39:59 +09003315 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05003316 trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
3317 rc);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003318 goto close_exit;
Steve French43f8a6a2019-12-02 21:46:54 -06003319 } else {
Steve Frenchf90f9792019-09-03 18:35:42 -05003320 trace_smb3_close_done(xid, persistent_fid, tcon->tid,
3321 ses->Suid);
Steve French43f8a6a2019-12-02 21:46:54 -06003322 /*
3323 * Note that have to subtract 4 since struct network_open_info
3324 * has a final 4 byte pad that close response does not have
3325 */
3326 if (pbuf)
3327 memcpy(pbuf, (char *)&rsp->CreationTime, sizeof(*pbuf) - 4);
3328 }
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003329
Steve Frenchfae80442018-10-19 17:14:32 -05003330 atomic_dec(&tcon->num_remote_opens);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003331close_exit:
Ronnie Sahlberg8eb4ecf2018-08-01 09:26:16 +10003332 SMB2_close_free(&rqst);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003333 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -08003334
3335 /* retry close in a worker thread if this one is interrupted */
Paulo Alcantara2659d3b2021-01-13 14:16:16 -03003336 if (is_interrupt_error(rc)) {
Steve French9e8fae22019-12-02 17:55:41 -06003337 int tmp_rc;
3338
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -08003339 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
3340 volatile_fid);
3341 if (tmp_rc)
3342 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
3343 persistent_fid, tmp_rc);
3344 }
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -08003345 return rc;
Ronnie Sahlberg97ca1762018-04-26 08:50:49 -06003346}
3347
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10003348int
Steve French43f8a6a2019-12-02 21:46:54 -06003349SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3350 u64 persistent_fid, u64 volatile_fid)
3351{
3352 return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL);
3353}
3354
3355int
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10003356smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
3357 struct kvec *iov, unsigned int min_buf_size)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003358{
Ronnie Sahlbergc1596ff2018-04-09 18:06:30 +10003359 unsigned int smb_len = iov->iov_len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003360 char *end_of_smb = smb_len + (char *)iov->iov_base;
3361 char *begin_of_buf = offset + (char *)iov->iov_base;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003362 char *end_of_buf = begin_of_buf + buffer_length;
3363
3364
3365 if (buffer_length < min_buf_size) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003366 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
3367 buffer_length, min_buf_size);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003368 return -EINVAL;
3369 }
3370
3371 /* check if beyond RFC1001 maximum length */
3372 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003373 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
3374 buffer_length, smb_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003375 return -EINVAL;
3376 }
3377
3378 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07003379 cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003380 return -EINVAL;
3381 }
3382
3383 return 0;
3384}
3385
3386/*
3387 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
3388 * Caller must free buffer.
3389 */
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10003390int
3391smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
3392 struct kvec *iov, unsigned int minbufsize,
3393 char *data)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003394{
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003395 char *begin_of_buf = offset + (char *)iov->iov_base;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003396 int rc;
3397
3398 if (!data)
3399 return -EINVAL;
3400
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10003401 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003402 if (rc)
3403 return rc;
3404
3405 memcpy(data, begin_of_buf, buffer_length);
3406
3407 return 0;
3408}
3409
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003410int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003411SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3412 struct smb_rqst *rqst,
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003413 u64 persistent_fid, u64 volatile_fid,
3414 u8 info_class, u8 info_type, u32 additional_info,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003415 size_t output_len, size_t input_len, void *input)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003416{
3417 struct smb2_query_info_req *req;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003418 struct kvec *iov = rqst->rq_iov;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11003419 unsigned int total_len;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003420 int rc;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003421
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003422 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
3423 (void **) &req, &total_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003424 if (rc)
3425 return rc;
3426
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003427 req->InfoType = info_type;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003428 req->FileInfoClass = info_class;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003429 req->PersistentFileId = persistent_fid;
3430 req->VolatileFileId = volatile_fid;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003431 req->AdditionalInformation = cpu_to_le32(additional_info);
Aurelien Aptel48923d22017-10-17 14:47:17 +02003432
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003433 req->OutputBufferLength = cpu_to_le32(output_len);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003434 if (input_len) {
3435 req->InputBufferLength = cpu_to_le32(input_len);
3436 /* total_len for smb query request never close to le16 max */
3437 req->InputBufferOffset = cpu_to_le16(total_len - 1);
3438 memcpy(req->Buffer, input, input_len);
3439 }
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003440
3441 iov[0].iov_base = (char *)req;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11003442 /* 1 for Buffer */
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003443 iov[0].iov_len = total_len - 1 + input_len;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003444 return 0;
3445}
3446
3447void
3448SMB2_query_info_free(struct smb_rqst *rqst)
3449{
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10003450 if (rqst && rqst->rq_iov)
3451 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003452}
3453
3454static int
3455query_info(const unsigned int xid, struct cifs_tcon *tcon,
3456 u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
3457 u32 additional_info, size_t output_len, size_t min_len, void **data,
3458 u32 *dlen)
3459{
3460 struct smb_rqst rqst;
3461 struct smb2_query_info_rsp *rsp = NULL;
3462 struct kvec iov[1];
3463 struct kvec rsp_iov;
3464 int rc = 0;
Garry McNultyef2298a2018-10-03 20:51:21 +01003465 int resp_buftype = CIFS_NO_BUFFER;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003466 struct cifs_ses *ses = tcon->ses;
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003467 struct TCP_Server_Info *server;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003468 int flags = 0;
Colin Ian King73aaf922019-01-16 16:28:59 +00003469 bool allocated = false;
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003470
3471 cifs_dbg(FYI, "Query Info\n");
3472
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003473 if (!ses)
3474 return -EIO;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003475 server = cifs_pick_channel(ses);
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01003476 if (!server)
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003477 return -EIO;
3478
3479 if (smb3_encryption_required(tcon))
3480 flags |= CIFS_TRANSFORM_REQ;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003481
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003482 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003483 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003484 rqst.rq_iov = iov;
3485 rqst.rq_nvec = 1;
3486
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003487 rc = SMB2_query_info_init(tcon, server,
3488 &rqst, persistent_fid, volatile_fid,
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003489 info_class, info_type, additional_info,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003490 output_len, 0, NULL);
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003491 if (rc)
3492 goto qinf_exit;
3493
Steve Frenchd42043a2019-02-26 21:58:30 -06003494 trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
3495 ses->Suid, info_class, (__u32)info_type);
3496
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003497 rc = cifs_send_recv(xid, ses, server,
3498 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003499 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovskye5d04882012-09-19 16:03:26 +04003500
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003501 if (rc) {
3502 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05003503 trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
3504 ses->Suid, info_class, (__u32)info_type, rc);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003505 goto qinf_exit;
3506 }
3507
Steve Frenchd42043a2019-02-26 21:58:30 -06003508 trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
3509 ses->Suid, info_class, (__u32)info_type);
3510
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003511 if (dlen) {
3512 *dlen = le32_to_cpu(rsp->OutputBufferLength);
3513 if (!*data) {
3514 *data = kmalloc(*dlen, GFP_KERNEL);
3515 if (!*data) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003516 cifs_tcon_dbg(VFS,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003517 "Error %d allocating memory for acl\n",
3518 rc);
3519 *dlen = 0;
Colin Ian King73aaf922019-01-16 16:28:59 +00003520 rc = -ENOMEM;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003521 goto qinf_exit;
3522 }
Colin Ian King73aaf922019-01-16 16:28:59 +00003523 allocated = true;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003524 }
3525 }
3526
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10003527 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
3528 le32_to_cpu(rsp->OutputBufferLength),
3529 &rsp_iov, min_len, *data);
Colin Ian King73aaf922019-01-16 16:28:59 +00003530 if (rc && allocated) {
3531 kfree(*data);
3532 *data = NULL;
3533 *dlen = 0;
3534 }
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003535
3536qinf_exit:
Ronnie Sahlberg296ecba2018-08-01 09:26:17 +10003537 SMB2_query_info_free(&rqst);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003538 free_rsp_buf(resp_buftype, rsp);
3539 return rc;
3540}
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003541
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003542int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3543 u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003544{
3545 return query_info(xid, tcon, persistent_fid, volatile_fid,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003546 FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +04003547 sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003548 sizeof(struct smb2_file_all_info), (void **)&data,
3549 NULL);
3550}
3551
Steve Frenche0ae8a92021-06-19 16:19:09 -05003552#if 0
3553/* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003554int
Steve Frenchb1bc1872020-06-11 20:23:38 -05003555SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3556 u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
3557{
3558 size_t output_len = sizeof(struct smb311_posix_qinfo *) +
3559 (sizeof(struct cifs_sid) * 2) + (PATH_MAX * 2);
3560 *plen = 0;
3561
3562 return query_info(xid, tcon, persistent_fid, volatile_fid,
3563 SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
3564 output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
Steve Frenche0ae8a92021-06-19 16:19:09 -05003565 /* Note caller must free "data" (passed in above). It may be allocated in query_info call */
Steve Frenchb1bc1872020-06-11 20:23:38 -05003566}
Steve Frenche0ae8a92021-06-19 16:19:09 -05003567#endif
Steve Frenchb1bc1872020-06-11 20:23:38 -05003568
3569int
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003570SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003571 u64 persistent_fid, u64 volatile_fid,
Boris Protopopov9541b812020-12-17 20:58:08 +00003572 void **data, u32 *plen, u32 extra_info)
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003573{
Boris Protopopov9541b812020-12-17 20:58:08 +00003574 __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
3575 extra_info;
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003576 *plen = 0;
3577
3578 return query_info(xid, tcon, persistent_fid, volatile_fid,
3579 0, SMB2_O_INFO_SECURITY, additional_info,
Shirish Pargaonkaree25c6d2018-06-04 06:46:22 -05003580 SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003581}
3582
3583int
3584SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
3585 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
3586{
3587 return query_info(xid, tcon, persistent_fid, volatile_fid,
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003588 FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003589 sizeof(struct smb2_file_internal_info),
Shirish Pargaonkar42c493c2017-06-22 22:51:31 -05003590 sizeof(struct smb2_file_internal_info),
3591 (void **)&uniqueid, NULL);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003592}
3593
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003594/*
Steve Frenchc3498182019-09-15 22:38:52 -05003595 * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory
3596 * See MS-SMB2 2.2.35 and 2.2.36
3597 */
3598
zhengbin388962e2019-09-23 15:06:18 +08003599static int
Steve Frenchc3498182019-09-15 22:38:52 -05003600SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003601 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3602 u64 persistent_fid, u64 volatile_fid,
3603 u32 completion_filter, bool watch_tree)
Steve Frenchc3498182019-09-15 22:38:52 -05003604{
3605 struct smb2_change_notify_req *req;
3606 struct kvec *iov = rqst->rq_iov;
3607 unsigned int total_len;
3608 int rc;
3609
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003610 rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
3611 (void **) &req, &total_len);
Steve Frenchc3498182019-09-15 22:38:52 -05003612 if (rc)
3613 return rc;
3614
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003615 req->PersistentFileId = cpu_to_le64(persistent_fid);
3616 req->VolatileFileId = cpu_to_le64(volatile_fid);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06003617 /* See note 354 of MS-SMB2, 64K max */
Steve French52870d52019-10-01 21:25:46 -05003618 req->OutputBufferLength =
3619 cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
Steve Frenchc3498182019-09-15 22:38:52 -05003620 req->CompletionFilter = cpu_to_le32(completion_filter);
3621 if (watch_tree)
3622 req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
3623 else
3624 req->Flags = 0;
3625
3626 iov[0].iov_base = (char *)req;
3627 iov[0].iov_len = total_len;
3628
3629 return 0;
3630}
3631
3632int
3633SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
3634 u64 persistent_fid, u64 volatile_fid, bool watch_tree,
3635 u32 completion_filter)
3636{
3637 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003638 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve Frenchc3498182019-09-15 22:38:52 -05003639 struct smb_rqst rqst;
3640 struct kvec iov[1];
3641 struct kvec rsp_iov = {NULL, 0};
3642 int resp_buftype = CIFS_NO_BUFFER;
3643 int flags = 0;
3644 int rc = 0;
3645
3646 cifs_dbg(FYI, "change notify\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003647 if (!ses || !server)
Steve Frenchc3498182019-09-15 22:38:52 -05003648 return -EIO;
3649
3650 if (smb3_encryption_required(tcon))
3651 flags |= CIFS_TRANSFORM_REQ;
3652
3653 memset(&rqst, 0, sizeof(struct smb_rqst));
3654 memset(&iov, 0, sizeof(iov));
3655 rqst.rq_iov = iov;
3656 rqst.rq_nvec = 1;
3657
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003658 rc = SMB2_notify_init(xid, &rqst, tcon, server,
3659 persistent_fid, volatile_fid,
Steve Frenchc3498182019-09-15 22:38:52 -05003660 completion_filter, watch_tree);
3661 if (rc)
3662 goto cnotify_exit;
3663
3664 trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
3665 (u8)watch_tree, completion_filter);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003666 rc = cifs_send_recv(xid, ses, server,
3667 &rqst, &resp_buftype, flags, &rsp_iov);
Steve Frenchc3498182019-09-15 22:38:52 -05003668
3669 if (rc != 0) {
3670 cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
3671 trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
3672 (u8)watch_tree, completion_filter, rc);
3673 } else
3674 trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
3675 ses->Suid, (u8)watch_tree, completion_filter);
3676
3677 cnotify_exit:
3678 if (rqst.rq_iov)
3679 cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
3680 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3681 return rc;
3682}
3683
3684
3685
3686/*
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003687 * This is a no-op for now. We're not really interested in the reply, but
3688 * rather in the fact that the server sent one and that server->lstrp
3689 * gets updated.
3690 *
3691 * FIXME: maybe we should consider checking that the reply matches request?
3692 */
3693static void
3694smb2_echo_callback(struct mid_q_entry *mid)
3695{
3696 struct TCP_Server_Info *server = mid->callback_data;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003697 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003698 struct cifs_credits credits = { .value = 0, .instance = 0 };
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003699
Pavel Shilovsky0fd1d372019-01-15 15:08:48 -08003700 if (mid->mid_state == MID_RESPONSE_RECEIVED
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003701 || mid->mid_state == MID_RESPONSE_MALFORMED) {
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003702 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003703 credits.instance = server->reconnect_instance;
3704 }
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003705
3706 DeleteMidQEntry(mid);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08003707 add_credits(server, &credits, CIFS_ECHO_OP);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003708}
3709
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003710void smb2_reconnect_server(struct work_struct *work)
3711{
3712 struct TCP_Server_Info *server = container_of(work,
3713 struct TCP_Server_Info, reconnect.work);
3714 struct cifs_ses *ses;
3715 struct cifs_tcon *tcon, *tcon2;
3716 struct list_head tmp_list;
3717 int tcon_exist = false;
Germano Percossi18ea4312017-04-07 12:29:36 +01003718 int rc;
3719 int resched = false;
3720
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003721
3722 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
3723 mutex_lock(&server->reconnect_mutex);
3724
3725 INIT_LIST_HEAD(&tmp_list);
3726 cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
3727
3728 spin_lock(&cifs_tcp_ses_lock);
3729 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3730 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
Pavel Shilovsky96a988f2016-11-29 11:31:23 -08003731 if (tcon->need_reconnect || tcon->need_reopen_files) {
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003732 tcon->tc_count++;
3733 list_add_tail(&tcon->rlist, &tmp_list);
3734 tcon_exist = true;
3735 }
3736 }
Ronnie Sahlberg0ff2b012019-06-05 10:15:34 +10003737 /*
3738 * IPC has the same lifetime as its session and uses its
3739 * refcount.
3740 */
Aurelien Aptelb327a712018-01-24 13:46:10 +01003741 if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
3742 list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
3743 tcon_exist = true;
Ronnie Sahlberg0ff2b012019-06-05 10:15:34 +10003744 ses->ses_count++;
Aurelien Aptelb327a712018-01-24 13:46:10 +01003745 }
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003746 }
3747 /*
3748 * Get the reference to server struct to be sure that the last call of
3749 * cifs_put_tcon() in the loop below won't release the server pointer.
3750 */
3751 if (tcon_exist)
3752 server->srv_count++;
3753
3754 spin_unlock(&cifs_tcp_ses_lock);
3755
3756 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003757 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
Germano Percossi18ea4312017-04-07 12:29:36 +01003758 if (!rc)
Pavel Shilovsky96a988f2016-11-29 11:31:23 -08003759 cifs_reopen_persistent_handles(tcon);
Germano Percossi18ea4312017-04-07 12:29:36 +01003760 else
3761 resched = true;
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003762 list_del_init(&tcon->rlist);
Ronnie Sahlberg0ff2b012019-06-05 10:15:34 +10003763 if (tcon->ipc)
3764 cifs_put_smb_ses(tcon->ses);
3765 else
3766 cifs_put_tcon(tcon);
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003767 }
3768
3769 cifs_dbg(FYI, "Reconnecting tcons finished\n");
Germano Percossi18ea4312017-04-07 12:29:36 +01003770 if (resched)
3771 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003772 mutex_unlock(&server->reconnect_mutex);
3773
3774 /* now we can safely release srv struct */
3775 if (tcon_exist)
3776 cifs_put_tcp_session(server, 1);
3777}
3778
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003779int
3780SMB2_echo(struct TCP_Server_Info *server)
3781{
3782 struct smb2_echo_req *req;
3783 int rc = 0;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003784 struct kvec iov[1];
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08003785 struct smb_rqst rqst = { .rq_iov = iov,
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003786 .rq_nvec = 1 };
Ronnie Sahlberg7f7ae752017-11-09 12:14:21 +11003787 unsigned int total_len;
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003788
Joe Perchesf96637b2013-05-04 22:12:25 -05003789 cifs_dbg(FYI, "In echo request\n");
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003790
Steve French4fcd1812016-06-22 20:12:05 -05003791 if (server->tcpStatus == CifsNeedNegotiate) {
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003792 /* No need to send echo on newly established connections */
Stefan Metzmacherb08484d2020-02-24 14:14:59 +01003793 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
Pavel Shilovsky53e0e112016-11-04 11:50:31 -07003794 return rc;
Steve French4fcd1812016-06-22 20:12:05 -05003795 }
3796
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003797 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
3798 (void **)&req, &total_len);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003799 if (rc)
3800 return rc;
3801
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003802 req->hdr.CreditRequest = cpu_to_le16(1);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003803
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003804 iov[0].iov_len = total_len;
3805 iov[0].iov_base = (char *)req;
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003806
Pavel Shilovsky9b7c18a2016-11-16 14:06:17 -08003807 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -08003808 server, CIFS_ECHO_OP, NULL);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003809 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003810 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003811
3812 cifs_small_buf_release(req);
3813 return rc;
3814}
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003815
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003816void
3817SMB2_flush_free(struct smb_rqst *rqst)
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003818{
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003819 if (rqst && rqst->rq_iov)
3820 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3821}
3822
3823int
3824SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003825 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3826 u64 persistent_fid, u64 volatile_fid)
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003827{
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003828 struct smb2_flush_req *req;
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003829 struct kvec *iov = rqst->rq_iov;
Ronnie Sahlberg1f444e42017-11-20 11:24:39 +11003830 unsigned int total_len;
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003831 int rc;
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003832
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003833 rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
3834 (void **) &req, &total_len);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003835 if (rc)
3836 return rc;
3837
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003838 req->PersistentFileId = cpu_to_le64(persistent_fid);
3839 req->VolatileFileId = cpu_to_le64(volatile_fid);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003840
3841 iov[0].iov_base = (char *)req;
Ronnie Sahlberg1f444e42017-11-20 11:24:39 +11003842 iov[0].iov_len = total_len;
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003843
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003844 return 0;
3845}
3846
3847int
3848SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3849 u64 volatile_fid)
3850{
3851 struct cifs_ses *ses = tcon->ses;
3852 struct smb_rqst rqst;
3853 struct kvec iov[1];
3854 struct kvec rsp_iov = {NULL, 0};
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003855 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003856 int resp_buftype = CIFS_NO_BUFFER;
3857 int flags = 0;
3858 int rc = 0;
3859
3860 cifs_dbg(FYI, "flush\n");
3861 if (!ses || !(ses->server))
3862 return -EIO;
3863
3864 if (smb3_encryption_required(tcon))
3865 flags |= CIFS_TRANSFORM_REQ;
3866
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003867 memset(&rqst, 0, sizeof(struct smb_rqst));
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003868 memset(&iov, 0, sizeof(iov));
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10003869 rqst.rq_iov = iov;
3870 rqst.rq_nvec = 1;
3871
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003872 rc = SMB2_flush_init(xid, &rqst, tcon, server,
3873 persistent_fid, volatile_fid);
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003874 if (rc)
3875 goto flush_exit;
3876
Steve Frenchf90f9792019-09-03 18:35:42 -05003877 trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003878 rc = cifs_send_recv(xid, ses, server,
3879 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003880
Steve Frencheccb4422018-05-17 21:16:55 -05003881 if (rc != 0) {
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003882 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05003883 trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
3884 rc);
Steve Frenchf90f9792019-09-03 18:35:42 -05003885 } else
3886 trace_smb3_flush_done(xid, persistent_fid, tcon->tid,
3887 ses->Suid);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003888
Ronnie Sahlberg86e14e12019-07-16 15:07:08 +10003889 flush_exit:
3890 SMB2_flush_free(&rqst);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07003891 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003892 return rc;
3893}
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003894
3895/*
3896 * To form a chain of read requests, any read requests after the first should
3897 * have the end_of_chain boolean set to true.
3898 */
3899static int
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08003900smb2_new_read_req(void **buf, unsigned int *total_len,
Long Li2dabfd52017-11-07 01:54:53 -07003901 struct cifs_io_parms *io_parms, struct cifs_readdata *rdata,
3902 unsigned int remaining_bytes, int request_type)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003903{
3904 int rc = -EACCES;
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003905 struct smb2_read_req *req = NULL;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003906 struct smb2_hdr *shdr;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003907 struct TCP_Server_Info *server = io_parms->server;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003908
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003909 rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
3910 (void **) &req, total_len);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003911 if (rc)
3912 return rc;
Long Li2dabfd52017-11-07 01:54:53 -07003913
Long Li2dabfd52017-11-07 01:54:53 -07003914 if (server == NULL)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003915 return -ECONNABORTED;
3916
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003917 shdr = &req->hdr;
3918 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003919
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003920 req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
3921 req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003922 req->ReadChannelInfoOffset = 0; /* reserved */
3923 req->ReadChannelInfoLength = 0; /* reserved */
3924 req->Channel = 0; /* reserved */
3925 req->MinimumCount = 0;
3926 req->Length = cpu_to_le32(io_parms->length);
3927 req->Offset = cpu_to_le64(io_parms->offset);
Steve Frenchd323c2462019-02-25 00:52:43 -06003928
3929 trace_smb3_read_enter(0 /* xid */,
3930 io_parms->persistent_fid,
3931 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
3932 io_parms->offset, io_parms->length);
Long Libd3dcc62017-11-22 17:38:47 -07003933#ifdef CONFIG_CIFS_SMB_DIRECT
3934 /*
3935 * If we want to do a RDMA write, fill in and append
3936 * smbd_buffer_descriptor_v1 to the end of read request
3937 */
Long Libb4c0412018-04-17 12:17:08 -07003938 if (server->rdma && rdata && !server->sign &&
Long Libd3dcc62017-11-22 17:38:47 -07003939 rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003940
Long Libd3dcc62017-11-22 17:38:47 -07003941 struct smbd_buffer_descriptor_v1 *v1;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003942 bool need_invalidate = server->dialect == SMB30_PROT_ID;
Long Libd3dcc62017-11-22 17:38:47 -07003943
3944 rdata->mr = smbd_register_mr(
3945 server->smbd_conn, rdata->pages,
Long Li7cf20bc2018-05-30 12:48:02 -07003946 rdata->nr_pages, rdata->page_offset,
3947 rdata->tailsz, true, need_invalidate);
Long Libd3dcc62017-11-22 17:38:47 -07003948 if (!rdata->mr)
Long Lib7972092019-04-05 21:36:34 +00003949 return -EAGAIN;
Long Libd3dcc62017-11-22 17:38:47 -07003950
3951 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
3952 if (need_invalidate)
3953 req->Channel = SMB2_CHANNEL_RDMA_V1;
3954 req->ReadChannelInfoOffset =
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003955 cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
Long Libd3dcc62017-11-22 17:38:47 -07003956 req->ReadChannelInfoLength =
Steve French2026b062018-01-24 23:07:41 -06003957 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
Long Libd3dcc62017-11-22 17:38:47 -07003958 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
Steve French2026b062018-01-24 23:07:41 -06003959 v1->offset = cpu_to_le64(rdata->mr->mr->iova);
3960 v1->token = cpu_to_le32(rdata->mr->mr->rkey);
3961 v1->length = cpu_to_le32(rdata->mr->mr->length);
Long Libd3dcc62017-11-22 17:38:47 -07003962
3963 *total_len += sizeof(*v1) - 1;
3964 }
3965#endif
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003966 if (request_type & CHAINED_REQUEST) {
3967 if (!(request_type & END_OF_CHAIN)) {
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -08003968 /* next 8-byte aligned request */
3969 *total_len = DIV_ROUND_UP(*total_len, 8) * 8;
3970 shdr->NextCommand = cpu_to_le32(*total_len);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003971 } else /* END_OF_CHAIN */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003972 shdr->NextCommand = 0;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003973 if (request_type & RELATED_REQUEST) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07003974 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003975 /*
3976 * Related requests use info from previous read request
3977 * in chain.
3978 */
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003979 shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
3980 shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10003981 req->PersistentFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
3982 req->VolatileFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003983 }
3984 }
3985 if (remaining_bytes > io_parms->length)
3986 req->RemainingBytes = cpu_to_le32(remaining_bytes);
3987 else
3988 req->RemainingBytes = 0;
3989
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08003990 *buf = req;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003991 return rc;
3992}
3993
3994static void
3995smb2_readv_callback(struct mid_q_entry *mid)
3996{
3997 struct cifs_readdata *rdata = mid->callback_data;
3998 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003999 struct TCP_Server_Info *server = rdata->server;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004000 struct smb2_hdr *shdr =
4001 (struct smb2_hdr *)rdata->iov[0].iov_base;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004002 struct cifs_credits credits = { .value = 0, .instance = 0 };
Steve French46f17d12019-09-04 23:07:52 -05004003 struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
4004 .rq_nvec = 1,
Jeff Layton8321fec2012-09-19 06:22:32 -07004005 .rq_pages = rdata->pages,
Long Li1dbe3462018-05-30 12:47:55 -07004006 .rq_offset = rdata->page_offset,
Jeff Layton8321fec2012-09-19 06:22:32 -07004007 .rq_npages = rdata->nr_pages,
4008 .rq_pagesz = rdata->pagesz,
4009 .rq_tailsz = rdata->tailsz };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004010
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004011 WARN_ONCE(rdata->server != mid->server,
4012 "rdata server %p != mid server %p",
4013 rdata->server, mid->server);
4014
Joe Perchesf96637b2013-05-04 22:12:25 -05004015 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
4016 __func__, mid->mid, mid->mid_state, rdata->result,
4017 rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004018
4019 switch (mid->mid_state) {
4020 case MID_RESPONSE_RECEIVED:
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004021 credits.value = le16_to_cpu(shdr->CreditRequest);
4022 credits.instance = server->reconnect_instance;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004023 /* result already set, check signature */
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004024 if (server->sign && !mid->decrypted) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07004025 int rc;
4026
Jeff Layton0b688cf2012-09-18 16:20:34 -07004027 rc = smb2_verify_signature(&rqst, server);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07004028 if (rc)
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004029 cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05004030 rc);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07004031 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004032 /* FIXME: should this be counted toward the initiating task? */
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04004033 task_io_account_read(rdata->got_bytes);
4034 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004035 break;
4036 case MID_REQUEST_SUBMITTED:
4037 case MID_RETRY_NEEDED:
4038 rdata->result = -EAGAIN;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04004039 if (server->sign && rdata->got_bytes)
4040 /* reset bytes number since we can not check a sign */
4041 rdata->got_bytes = 0;
4042 /* FIXME: should this be counted toward the initiating task? */
4043 task_io_account_read(rdata->got_bytes);
4044 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004045 break;
Pavel Shilovsky0fd1d372019-01-15 15:08:48 -08004046 case MID_RESPONSE_MALFORMED:
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004047 credits.value = le16_to_cpu(shdr->CreditRequest);
4048 credits.instance = server->reconnect_instance;
Miaohe Lin30b5ae22020-08-08 16:36:37 +08004049 fallthrough;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004050 default:
Pavel Shilovsky6b15eb12019-01-18 15:46:14 -08004051 rdata->result = -EIO;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004052 }
Long Libd3dcc62017-11-22 17:38:47 -07004053#ifdef CONFIG_CIFS_SMB_DIRECT
4054 /*
4055 * If this rdata has a memmory registered, the MR can be freed
4056 * MR needs to be freed as soon as I/O finishes to prevent deadlock
4057 * because they have limited number and are used for future I/Os
4058 */
4059 if (rdata->mr) {
4060 smbd_deregister_mr(rdata->mr);
4061 rdata->mr = NULL;
4062 }
4063#endif
Pavel Shilovsky082aaa82019-01-18 15:54:34 -08004064 if (rdata->result && rdata->result != -ENODATA) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004065 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004066 trace_smb3_read_err(0 /* xid */,
4067 rdata->cfile->fid.persistent_fid,
4068 tcon->tid, tcon->ses->Suid, rdata->offset,
4069 rdata->bytes, rdata->result);
4070 } else
4071 trace_smb3_read_done(0 /* xid */,
4072 rdata->cfile->fid.persistent_fid,
4073 tcon->tid, tcon->ses->Suid,
4074 rdata->offset, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004075
4076 queue_work(cifsiod_wq, &rdata->work);
4077 DeleteMidQEntry(mid);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004078 add_credits(server, &credits, 0);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004079}
4080
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004081/* smb2_async_readv - send an async read, and set up mid to handle result */
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004082int
4083smb2_async_readv(struct cifs_readdata *rdata)
4084{
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004085 int rc, flags = 0;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004086 char *buf;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004087 struct smb2_hdr *shdr;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004088 struct cifs_io_parms io_parms;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004089 struct smb_rqst rqst = { .rq_iov = rdata->iov,
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004090 .rq_nvec = 1 };
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004091 struct TCP_Server_Info *server;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004092 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004093 unsigned int total_len;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004094
Joe Perchesf96637b2013-05-04 22:12:25 -05004095 cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
4096 __func__, rdata->offset, rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004097
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004098 if (!rdata->server)
4099 rdata->server = cifs_pick_channel(tcon->ses);
4100
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004101 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004102 io_parms.server = server = rdata->server;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004103 io_parms.offset = rdata->offset;
4104 io_parms.length = rdata->bytes;
4105 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
4106 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
4107 io_parms.pid = rdata->pid;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004108
Long Li2dabfd52017-11-07 01:54:53 -07004109 rc = smb2_new_read_req(
4110 (void **) &buf, &total_len, &io_parms, rdata, 0, 0);
Pavel Shilovskyf0b93cb2019-01-25 11:10:00 -08004111 if (rc)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004112 return rc;
4113
Steve French5a77e752018-05-09 17:43:08 -05004114 if (smb3_encryption_required(io_parms.tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004115 flags |= CIFS_TRANSFORM_REQ;
4116
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004117 rdata->iov[0].iov_base = buf;
4118 rdata->iov[0].iov_len = total_len;
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -08004119
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004120 shdr = (struct smb2_hdr *)buf;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004121
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004122 if (rdata->credits.value > 0) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004123 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004124 SMB2_MAX_BUFFER_SIZE));
Aurelien Aptel88fd98a2021-03-04 17:51:48 +00004125 shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004126
4127 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4128 if (rc)
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004129 goto async_readv_out;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004130
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004131 flags |= CIFS_HAS_CREDITS;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004132 }
4133
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004134 kref_get(&rdata->refcount);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004135 rc = cifs_call_async(server, &rqst,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004136 cifs_readv_receive, smb2_readv_callback,
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -08004137 smb3_handle_read_data, rdata, flags,
4138 &rdata->credits);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004139 if (rc) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004140 kref_put(&rdata->refcount, cifs_readdata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004141 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004142 trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
4143 io_parms.tcon->tid,
4144 io_parms.tcon->ses->Suid,
4145 io_parms.offset, io_parms.length, rc);
4146 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004147
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004148async_readv_out:
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004149 cifs_small_buf_release(buf);
4150 return rc;
4151}
Pavel Shilovsky33319142012-09-18 16:20:29 -07004152
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004153int
4154SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
4155 unsigned int *nbytes, char **buf, int *buf_type)
4156{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004157 struct smb_rqst rqst;
Colin Ian King1efd4fc2019-07-31 10:05:26 +01004158 int resp_buftype, rc;
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004159 struct smb2_read_req *req = NULL;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004160 struct smb2_read_rsp *rsp = NULL;
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004161 struct kvec iov[1];
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004162 struct kvec rsp_iov;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004163 unsigned int total_len;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004164 int flags = CIFS_LOG_ERROR;
4165 struct cifs_ses *ses = io_parms->tcon->ses;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004166
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004167 if (!io_parms->server)
4168 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4169
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004170 *nbytes = 0;
Long Li2dabfd52017-11-07 01:54:53 -07004171 rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004172 if (rc)
4173 return rc;
4174
Steve French5a77e752018-05-09 17:43:08 -05004175 if (smb3_encryption_required(io_parms->tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004176 flags |= CIFS_TRANSFORM_REQ;
4177
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004178 iov[0].iov_base = (char *)req;
4179 iov[0].iov_len = total_len;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004180
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004181 memset(&rqst, 0, sizeof(struct smb_rqst));
4182 rqst.rq_iov = iov;
4183 rqst.rq_nvec = 1;
4184
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004185 rc = cifs_send_recv(xid, ses, io_parms->server,
4186 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004187 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004188
4189 if (rc) {
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004190 if (rc != -ENODATA) {
4191 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
4192 cifs_dbg(VFS, "Send error in read = %d\n", rc);
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004193 trace_smb3_read_err(xid,
4194 le64_to_cpu(req->PersistentFileId),
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004195 io_parms->tcon->tid, ses->Suid,
4196 io_parms->offset, io_parms->length,
4197 rc);
Steve Frenchb0a42f22019-02-25 15:02:58 -06004198 } else
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004199 trace_smb3_read_done(xid,
4200 le64_to_cpu(req->PersistentFileId),
4201 io_parms->tcon->tid, ses->Suid,
4202 io_parms->offset, 0);
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004203 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Ronnie Sahlberg05fd5c22019-04-23 16:39:45 +10004204 cifs_small_buf_release(req);
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004205 return rc == -ENODATA ? 0 : rc;
Steve Frencheccb4422018-05-17 21:16:55 -05004206 } else
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004207 trace_smb3_read_done(xid,
4208 le64_to_cpu(req->PersistentFileId),
Steve Frencheccb4422018-05-17 21:16:55 -05004209 io_parms->tcon->tid, ses->Suid,
4210 io_parms->offset, io_parms->length);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004211
ZhangXiaoxu088aaf12019-04-06 15:47:39 +08004212 cifs_small_buf_release(req);
4213
Ronnie Sahlberga821df32017-11-21 09:36:33 +11004214 *nbytes = le32_to_cpu(rsp->DataLength);
4215 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
4216 (*nbytes > io_parms->length)) {
4217 cifs_dbg(FYI, "bad length %d for count %d\n",
4218 *nbytes, io_parms->length);
4219 rc = -EIO;
4220 *nbytes = 0;
4221 }
4222
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004223 if (*buf) {
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004224 memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004225 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004226 } else if (resp_buftype != CIFS_NO_BUFFER) {
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004227 *buf = rsp_iov.iov_base;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004228 if (resp_buftype == CIFS_SMALL_BUFFER)
4229 *buf_type = CIFS_SMALL_BUFFER;
4230 else if (resp_buftype == CIFS_LARGE_BUFFER)
4231 *buf_type = CIFS_LARGE_BUFFER;
4232 }
4233 return rc;
4234}
4235
Pavel Shilovsky33319142012-09-18 16:20:29 -07004236/*
4237 * Check the mid_state and signature on received buffer (if any), and queue the
4238 * workqueue completion task.
4239 */
4240static void
4241smb2_writev_callback(struct mid_q_entry *mid)
4242{
4243 struct cifs_writedata *wdata = mid->callback_data;
4244 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004245 struct TCP_Server_Info *server = wdata->server;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004246 unsigned int written;
4247 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004248 struct cifs_credits credits = { .value = 0, .instance = 0 };
Pavel Shilovsky33319142012-09-18 16:20:29 -07004249
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004250 WARN_ONCE(wdata->server != mid->server,
4251 "wdata server %p != mid server %p",
4252 wdata->server, mid->server);
4253
Pavel Shilovsky33319142012-09-18 16:20:29 -07004254 switch (mid->mid_state) {
4255 case MID_RESPONSE_RECEIVED:
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004256 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004257 credits.instance = server->reconnect_instance;
4258 wdata->result = smb2_check_receive(mid, server, 0);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004259 if (wdata->result != 0)
4260 break;
4261
4262 written = le32_to_cpu(rsp->DataLength);
4263 /*
4264 * Mask off high 16 bits when bytes written as returned
4265 * by the server is greater than bytes requested by the
4266 * client. OS/2 servers are known to set incorrect
4267 * CountHigh values.
4268 */
4269 if (written > wdata->bytes)
4270 written &= 0xFFFF;
4271
4272 if (written < wdata->bytes)
4273 wdata->result = -ENOSPC;
4274 else
4275 wdata->bytes = written;
4276 break;
4277 case MID_REQUEST_SUBMITTED:
4278 case MID_RETRY_NEEDED:
4279 wdata->result = -EAGAIN;
4280 break;
Pavel Shilovsky0fd1d372019-01-15 15:08:48 -08004281 case MID_RESPONSE_MALFORMED:
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004282 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004283 credits.instance = server->reconnect_instance;
Miaohe Lin30b5ae22020-08-08 16:36:37 +08004284 fallthrough;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004285 default:
4286 wdata->result = -EIO;
4287 break;
4288 }
Long Lidb223a52017-11-22 17:38:45 -07004289#ifdef CONFIG_CIFS_SMB_DIRECT
4290 /*
4291 * If this wdata has a memory registered, the MR can be freed
4292 * The number of MRs available is limited, it's important to recover
4293 * used MR as soon as I/O is finished. Hold MR longer in the later
4294 * I/O process can possibly result in I/O deadlock due to lack of MR
4295 * to send request on I/O retry
4296 */
4297 if (wdata->mr) {
4298 smbd_deregister_mr(wdata->mr);
4299 wdata->mr = NULL;
4300 }
4301#endif
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004302 if (wdata->result) {
Pavel Shilovsky33319142012-09-18 16:20:29 -07004303 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004304 trace_smb3_write_err(0 /* no xid */,
4305 wdata->cfile->fid.persistent_fid,
4306 tcon->tid, tcon->ses->Suid, wdata->offset,
4307 wdata->bytes, wdata->result);
Steve Frenchd6fd4192020-02-05 16:52:11 -06004308 if (wdata->result == -ENOSPC)
Joe Perchesa0a30362020-04-14 22:42:53 -07004309 pr_warn_once("Out of space writing to %s\n",
4310 tcon->treeName);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004311 } else
4312 trace_smb3_write_done(0 /* no xid */,
4313 wdata->cfile->fid.persistent_fid,
4314 tcon->tid, tcon->ses->Suid,
4315 wdata->offset, wdata->bytes);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004316
4317 queue_work(cifsiod_wq, &wdata->work);
4318 DeleteMidQEntry(mid);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08004319 add_credits(server, &credits, 0);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004320}
4321
4322/* smb2_async_writev - send an async write, and set up mid to handle result */
4323int
Steve French4a5c80d2014-02-07 20:45:12 -06004324smb2_async_writev(struct cifs_writedata *wdata,
4325 void (*release)(struct kref *kref))
Pavel Shilovsky33319142012-09-18 16:20:29 -07004326{
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004327 int rc = -EACCES, flags = 0;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004328 struct smb2_write_req *req = NULL;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004329 struct smb2_hdr *shdr;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004330 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004331 struct TCP_Server_Info *server = wdata->server;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004332 struct kvec iov[1];
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004333 struct smb_rqst rqst = { };
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004334 unsigned int total_len;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004335
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004336 if (!wdata->server)
4337 server = wdata->server = cifs_pick_channel(tcon->ses);
4338
4339 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
4340 (void **) &req, &total_len);
Pavel Shilovskyf0b93cb2019-01-25 11:10:00 -08004341 if (rc)
4342 return rc;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004343
Steve French5a77e752018-05-09 17:43:08 -05004344 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004345 flags |= CIFS_TRANSFORM_REQ;
4346
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004347 shdr = (struct smb2_hdr *)req;
4348 shdr->Id.SyncId.ProcessId = cpu_to_le32(wdata->cfile->pid);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004349
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004350 req->PersistentFileId = cpu_to_le64(wdata->cfile->fid.persistent_fid);
4351 req->VolatileFileId = cpu_to_le64(wdata->cfile->fid.volatile_fid);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004352 req->WriteChannelInfoOffset = 0;
4353 req->WriteChannelInfoLength = 0;
4354 req->Channel = 0;
4355 req->Offset = cpu_to_le64(wdata->offset);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004356 req->DataOffset = cpu_to_le16(
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004357 offsetof(struct smb2_write_req, Buffer));
Pavel Shilovsky33319142012-09-18 16:20:29 -07004358 req->RemainingBytes = 0;
Steve Frenchd323c2462019-02-25 00:52:43 -06004359
4360 trace_smb3_write_enter(0 /* xid */, wdata->cfile->fid.persistent_fid,
4361 tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes);
Long Lidb223a52017-11-22 17:38:45 -07004362#ifdef CONFIG_CIFS_SMB_DIRECT
4363 /*
4364 * If we want to do a server RDMA read, fill in and append
4365 * smbd_buffer_descriptor_v1 to the end of write request
4366 */
Long Libb4c0412018-04-17 12:17:08 -07004367 if (server->rdma && !server->sign && wdata->bytes >=
Long Lidb223a52017-11-22 17:38:45 -07004368 server->smbd_conn->rdma_readwrite_threshold) {
Pavel Shilovsky33319142012-09-18 16:20:29 -07004369
Long Lidb223a52017-11-22 17:38:45 -07004370 struct smbd_buffer_descriptor_v1 *v1;
4371 bool need_invalidate = server->dialect == SMB30_PROT_ID;
4372
4373 wdata->mr = smbd_register_mr(
4374 server->smbd_conn, wdata->pages,
Long Li7cf20bc2018-05-30 12:48:02 -07004375 wdata->nr_pages, wdata->page_offset,
4376 wdata->tailsz, false, need_invalidate);
Long Lidb223a52017-11-22 17:38:45 -07004377 if (!wdata->mr) {
Long Lib7972092019-04-05 21:36:34 +00004378 rc = -EAGAIN;
Long Lidb223a52017-11-22 17:38:45 -07004379 goto async_writev_out;
4380 }
4381 req->Length = 0;
4382 req->DataOffset = 0;
Long Li7cf20bc2018-05-30 12:48:02 -07004383 if (wdata->nr_pages > 1)
4384 req->RemainingBytes =
4385 cpu_to_le32(
4386 (wdata->nr_pages - 1) * wdata->pagesz -
4387 wdata->page_offset + wdata->tailsz
4388 );
4389 else
4390 req->RemainingBytes = cpu_to_le32(wdata->tailsz);
Long Lidb223a52017-11-22 17:38:45 -07004391 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
4392 if (need_invalidate)
4393 req->Channel = SMB2_CHANNEL_RDMA_V1;
4394 req->WriteChannelInfoOffset =
Steve French2026b062018-01-24 23:07:41 -06004395 cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
Long Lidb223a52017-11-22 17:38:45 -07004396 req->WriteChannelInfoLength =
Steve French2026b062018-01-24 23:07:41 -06004397 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
Long Lidb223a52017-11-22 17:38:45 -07004398 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
Steve French2026b062018-01-24 23:07:41 -06004399 v1->offset = cpu_to_le64(wdata->mr->mr->iova);
4400 v1->token = cpu_to_le32(wdata->mr->mr->rkey);
4401 v1->length = cpu_to_le32(wdata->mr->mr->length);
Long Lidb223a52017-11-22 17:38:45 -07004402 }
4403#endif
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004404 iov[0].iov_len = total_len - 1;
4405 iov[0].iov_base = (char *)req;
Pavel Shilovsky33319142012-09-18 16:20:29 -07004406
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08004407 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004408 rqst.rq_nvec = 1;
Jeff Laytoneddb0792012-09-18 16:20:35 -07004409 rqst.rq_pages = wdata->pages;
Long Li57a929a2018-05-30 12:47:53 -07004410 rqst.rq_offset = wdata->page_offset;
Jeff Laytoneddb0792012-09-18 16:20:35 -07004411 rqst.rq_npages = wdata->nr_pages;
4412 rqst.rq_pagesz = wdata->pagesz;
4413 rqst.rq_tailsz = wdata->tailsz;
Long Lidb223a52017-11-22 17:38:45 -07004414#ifdef CONFIG_CIFS_SMB_DIRECT
4415 if (wdata->mr) {
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004416 iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
Long Lidb223a52017-11-22 17:38:45 -07004417 rqst.rq_npages = 0;
4418 }
4419#endif
Joe Perchesf96637b2013-05-04 22:12:25 -05004420 cifs_dbg(FYI, "async write at %llu %u bytes\n",
4421 wdata->offset, wdata->bytes);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004422
Long Lidb223a52017-11-22 17:38:45 -07004423#ifdef CONFIG_CIFS_SMB_DIRECT
4424 /* For RDMA read, I/O size is in RemainingBytes not in Length */
4425 if (!wdata->mr)
4426 req->Length = cpu_to_le32(wdata->bytes);
4427#else
Pavel Shilovsky33319142012-09-18 16:20:29 -07004428 req->Length = cpu_to_le32(wdata->bytes);
Long Lidb223a52017-11-22 17:38:45 -07004429#endif
Pavel Shilovsky33319142012-09-18 16:20:29 -07004430
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004431 if (wdata->credits.value > 0) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004432 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004433 SMB2_MAX_BUFFER_SIZE));
Aurelien Aptel88fd98a2021-03-04 17:51:48 +00004434 shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004435
4436 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
4437 if (rc)
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004438 goto async_writev_out;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004439
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004440 flags |= CIFS_HAS_CREDITS;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004441 }
4442
Pavel Shilovsky33319142012-09-18 16:20:29 -07004443 kref_get(&wdata->refcount);
Pavel Shilovsky9b7c18a2016-11-16 14:06:17 -08004444 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -08004445 wdata, flags, &wdata->credits);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004446
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004447 if (rc) {
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004448 trace_smb3_write_err(0 /* no xid */,
4449 le64_to_cpu(req->PersistentFileId),
Steve Frencheccb4422018-05-17 21:16:55 -05004450 tcon->tid, tcon->ses->Suid, wdata->offset,
4451 wdata->bytes, rc);
Steve French4a5c80d2014-02-07 20:45:12 -06004452 kref_put(&wdata->refcount, release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004453 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
Pavel Shilovsky7d42e722019-01-25 11:38:53 -08004454 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07004455
Pavel Shilovsky33319142012-09-18 16:20:29 -07004456async_writev_out:
4457 cifs_small_buf_release(req);
Pavel Shilovsky33319142012-09-18 16:20:29 -07004458 return rc;
4459}
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004460
4461/*
4462 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
4463 * The length field from io_parms must be at least 1 and indicates a number of
4464 * elements with data to write that begins with position 1 in iov array. All
4465 * data length is specified by count.
4466 */
4467int
4468SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
4469 unsigned int *nbytes, struct kvec *iov, int n_vec)
4470{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004471 struct smb_rqst rqst;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004472 int rc = 0;
4473 struct smb2_write_req *req = NULL;
4474 struct smb2_write_rsp *rsp = NULL;
4475 int resp_buftype;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004476 struct kvec rsp_iov;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004477 int flags = 0;
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004478 unsigned int total_len;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004479 struct TCP_Server_Info *server;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004480
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004481 *nbytes = 0;
4482
4483 if (n_vec < 1)
4484 return rc;
4485
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004486 if (!io_parms->server)
4487 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4488 server = io_parms->server;
4489 if (server == NULL)
4490 return -ECONNABORTED;
4491
4492 rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
4493 (void **) &req, &total_len);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004494 if (rc)
4495 return rc;
4496
Steve French5a77e752018-05-09 17:43:08 -05004497 if (smb3_encryption_required(io_parms->tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004498 flags |= CIFS_TRANSFORM_REQ;
4499
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004500 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004501
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004502 req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
4503 req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004504 req->WriteChannelInfoOffset = 0;
4505 req->WriteChannelInfoLength = 0;
4506 req->Channel = 0;
4507 req->Length = cpu_to_le32(io_parms->length);
4508 req->Offset = cpu_to_le64(io_parms->offset);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004509 req->DataOffset = cpu_to_le16(
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004510 offsetof(struct smb2_write_req, Buffer));
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004511 req->RemainingBytes = 0;
4512
Steve Frenchd323c2462019-02-25 00:52:43 -06004513 trace_smb3_write_enter(xid, io_parms->persistent_fid,
4514 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
4515 io_parms->offset, io_parms->length);
4516
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004517 iov[0].iov_base = (char *)req;
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004518 /* 1 for Buffer */
4519 iov[0].iov_len = total_len - 1;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004520
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004521 memset(&rqst, 0, sizeof(struct smb_rqst));
4522 rqst.rq_iov = iov;
4523 rqst.rq_nvec = n_vec + 1;
4524
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004525 rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
4526 &rqst,
Ronnie Sahlbergf5688a62017-11-20 11:24:41 +11004527 &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004528 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004529
4530 if (rc) {
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004531 trace_smb3_write_err(xid,
4532 le64_to_cpu(req->PersistentFileId),
Steve Frencheccb4422018-05-17 21:16:55 -05004533 io_parms->tcon->tid,
4534 io_parms->tcon->ses->Suid,
4535 io_parms->offset, io_parms->length, rc);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004536 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05004537 cifs_dbg(VFS, "Send error in write = %d\n", rc);
Steve Frencheccb4422018-05-17 21:16:55 -05004538 } else {
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004539 *nbytes = le32_to_cpu(rsp->DataLength);
Ronnie Sahlbergd8d9de52021-09-08 12:10:14 +10004540 trace_smb3_write_done(xid,
4541 le64_to_cpu(req->PersistentFileId),
4542 io_parms->tcon->tid,
4543 io_parms->tcon->ses->Suid,
4544 io_parms->offset, *nbytes);
Steve Frencheccb4422018-05-17 21:16:55 -05004545 }
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004546
ZhangXiaoxu6a3eb332019-04-06 15:47:38 +08004547 cifs_small_buf_release(req);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004548 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004549 return rc;
4550}
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004551
Aurelien Aptel69dda302020-03-02 17:53:22 +01004552int posix_info_sid_size(const void *beg, const void *end)
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004553{
4554 size_t subauth;
4555 int total;
4556
4557 if (beg + 1 > end)
4558 return -1;
4559
4560 subauth = *(u8 *)(beg+1);
4561 if (subauth < 1 || subauth > 15)
4562 return -1;
4563
4564 total = 1 + 1 + 6 + 4*subauth;
4565 if (beg + total > end)
4566 return -1;
4567
4568 return total;
4569}
4570
4571int posix_info_parse(const void *beg, const void *end,
4572 struct smb2_posix_info_parsed *out)
4573
4574{
4575 int total_len = 0;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004576 int owner_len, group_len;
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004577 int name_len;
4578 const void *owner_sid;
4579 const void *group_sid;
4580 const void *name;
4581
4582 /* if no end bound given, assume payload to be correct */
4583 if (!end) {
4584 const struct smb2_posix_info *p = beg;
4585
4586 end = beg + le32_to_cpu(p->NextEntryOffset);
4587 /* last element will have a 0 offset, pick a sensible bound */
4588 if (end == beg)
4589 end += 0xFFFF;
4590 }
4591
4592 /* check base buf */
4593 if (beg + sizeof(struct smb2_posix_info) > end)
4594 return -1;
4595 total_len = sizeof(struct smb2_posix_info);
4596
4597 /* check owner sid */
4598 owner_sid = beg + total_len;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004599 owner_len = posix_info_sid_size(owner_sid, end);
4600 if (owner_len < 0)
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004601 return -1;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004602 total_len += owner_len;
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004603
4604 /* check group sid */
4605 group_sid = beg + total_len;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004606 group_len = posix_info_sid_size(group_sid, end);
4607 if (group_len < 0)
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004608 return -1;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004609 total_len += group_len;
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004610
4611 /* check name len */
4612 if (beg + total_len + 4 > end)
4613 return -1;
4614 name_len = le32_to_cpu(*(__le32 *)(beg + total_len));
4615 if (name_len < 1 || name_len > 0xFFFF)
4616 return -1;
4617 total_len += 4;
4618
4619 /* check name */
4620 name = beg + total_len;
4621 if (name + name_len > end)
4622 return -1;
4623 total_len += name_len;
4624
4625 if (out) {
4626 out->base = beg;
4627 out->size = total_len;
4628 out->name_len = name_len;
4629 out->name = name;
Ronnie Sahlbergca38fab2021-06-18 10:58:30 +10004630 memcpy(&out->owner, owner_sid, owner_len);
4631 memcpy(&out->group, group_sid, group_len);
Aurelien Aptel349e13a2020-02-08 15:50:57 +01004632 }
4633 return total_len;
4634}
4635
4636static int posix_info_extra_size(const void *beg, const void *end)
4637{
4638 int len = posix_info_parse(beg, end, NULL);
4639
4640 if (len < 0)
4641 return -1;
4642 return len - sizeof(struct smb2_posix_info);
4643}
4644
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004645static unsigned int
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004646num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
4647 size_t size)
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004648{
4649 int len;
4650 unsigned int entrycount = 0;
4651 unsigned int next_offset = 0;
Dan Carpenter56446f22018-09-06 12:48:22 +03004652 char *entryptr;
4653 FILE_DIRECTORY_INFO *dir_info;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004654
4655 if (bufstart == NULL)
4656 return 0;
4657
Dan Carpenter56446f22018-09-06 12:48:22 +03004658 entryptr = bufstart;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004659
4660 while (1) {
Dan Carpenter56446f22018-09-06 12:48:22 +03004661 if (entryptr + next_offset < entryptr ||
4662 entryptr + next_offset > end_of_buf ||
4663 entryptr + next_offset + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004664 cifs_dbg(VFS, "malformed search entry would overflow\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004665 break;
4666 }
4667
Dan Carpenter56446f22018-09-06 12:48:22 +03004668 entryptr = entryptr + next_offset;
4669 dir_info = (FILE_DIRECTORY_INFO *)entryptr;
4670
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004671 if (infotype == SMB_FIND_FILE_POSIX_INFO)
4672 len = posix_info_extra_size(entryptr, end_of_buf);
4673 else
4674 len = le32_to_cpu(dir_info->FileNameLength);
4675
4676 if (len < 0 ||
4677 entryptr + len < entryptr ||
Dan Carpenter56446f22018-09-06 12:48:22 +03004678 entryptr + len > end_of_buf ||
4679 entryptr + len + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004680 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
4681 end_of_buf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004682 break;
4683 }
4684
Dan Carpenter56446f22018-09-06 12:48:22 +03004685 *lastentry = entryptr;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004686 entrycount++;
4687
Dan Carpenter56446f22018-09-06 12:48:22 +03004688 next_offset = le32_to_cpu(dir_info->NextEntryOffset);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004689 if (!next_offset)
4690 break;
4691 }
4692
4693 return entrycount;
4694}
4695
4696/*
4697 * Readdir/FindFirst
4698 */
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004699int SMB2_query_directory_init(const unsigned int xid,
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004700 struct cifs_tcon *tcon,
4701 struct TCP_Server_Info *server,
4702 struct smb_rqst *rqst,
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004703 u64 persistent_fid, u64 volatile_fid,
4704 int index, int info_level)
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004705{
4706 struct smb2_query_directory_req *req;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004707 unsigned char *bufptr;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004708 __le16 asteriks = cpu_to_le16('*');
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004709 unsigned int output_size = CIFSMaxBufSize -
4710 MAX_SMB2_CREATE_RESPONSE_SIZE -
4711 MAX_SMB2_CLOSE_RESPONSE_SIZE;
Ronnie Sahlberg7c00c3a2017-11-20 11:24:45 +11004712 unsigned int total_len;
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004713 struct kvec *iov = rqst->rq_iov;
4714 int len, rc;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004715
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004716 rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
4717 (void **) &req, &total_len);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004718 if (rc)
4719 return rc;
4720
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004721 switch (info_level) {
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004722 case SMB_FIND_FILE_DIRECTORY_INFO:
4723 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004724 break;
4725 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
4726 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004727 break;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004728 case SMB_FIND_FILE_POSIX_INFO:
4729 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
4730 break;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004731 default:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004732 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004733 info_level);
4734 return -EINVAL;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004735 }
4736
4737 req->FileIndex = cpu_to_le32(index);
4738 req->PersistentFileId = persistent_fid;
4739 req->VolatileFileId = volatile_fid;
4740
4741 len = 0x2;
4742 bufptr = req->Buffer;
4743 memcpy(bufptr, &asteriks, len);
4744
4745 req->FileNameOffset =
Ronnie Sahlberg7c00c3a2017-11-20 11:24:45 +11004746 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004747 req->FileNameLength = cpu_to_le16(len);
4748 /*
4749 * BB could be 30 bytes or so longer if we used SMB2 specific
4750 * buffer lengths, but this is safe and close enough.
4751 */
4752 output_size = min_t(unsigned int, output_size, server->maxBuf);
4753 output_size = min_t(unsigned int, output_size, 2 << 15);
4754 req->OutputBufferLength = cpu_to_le32(output_size);
4755
4756 iov[0].iov_base = (char *)req;
Ronnie Sahlberg7c00c3a2017-11-20 11:24:45 +11004757 /* 1 for Buffer */
4758 iov[0].iov_len = total_len - 1;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004759
4760 iov[1].iov_base = (char *)(req->Buffer);
4761 iov[1].iov_len = len;
4762
Steve Frenchd323c2462019-02-25 00:52:43 -06004763 trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
4764 tcon->ses->Suid, index, output_size);
4765
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004766 return 0;
4767}
4768
4769void SMB2_query_directory_free(struct smb_rqst *rqst)
4770{
4771 if (rqst && rqst->rq_iov) {
4772 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4773 }
4774}
4775
4776int
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004777smb2_parse_query_directory(struct cifs_tcon *tcon,
4778 struct kvec *rsp_iov,
4779 int resp_buftype,
4780 struct cifs_search_info *srch_inf)
4781{
4782 struct smb2_query_directory_rsp *rsp;
4783 size_t info_buf_size;
4784 char *end_of_smb;
4785 int rc;
4786
4787 rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
4788
4789 switch (srch_inf->info_level) {
4790 case SMB_FIND_FILE_DIRECTORY_INFO:
4791 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
4792 break;
4793 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
4794 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
4795 break;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004796 case SMB_FIND_FILE_POSIX_INFO:
4797 /* note that posix payload are variable size */
4798 info_buf_size = sizeof(struct smb2_posix_info);
4799 break;
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004800 default:
4801 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
4802 srch_inf->info_level);
4803 return -EINVAL;
4804 }
4805
4806 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
4807 le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
4808 info_buf_size);
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004809 if (rc) {
4810 cifs_tcon_dbg(VFS, "bad info payload");
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004811 return rc;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004812 }
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004813
4814 srch_inf->unicode = true;
4815
4816 if (srch_inf->ntwrk_buf_start) {
4817 if (srch_inf->smallBuf)
4818 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
4819 else
4820 cifs_buf_release(srch_inf->ntwrk_buf_start);
4821 }
4822 srch_inf->ntwrk_buf_start = (char *)rsp;
4823 srch_inf->srch_entries_start = srch_inf->last_entry =
4824 (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
4825 end_of_smb = rsp_iov->iov_len + (char *)rsp;
Aurelien Aptel3d519bd2020-02-08 15:50:58 +01004826
4827 srch_inf->entries_in_buffer = num_entries(
4828 srch_inf->info_level,
4829 srch_inf->srch_entries_start,
4830 end_of_smb,
4831 &srch_inf->last_entry,
4832 info_buf_size);
4833
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004834 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
4835 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
4836 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
4837 srch_inf->srch_entries_start, srch_inf->last_entry);
4838 if (resp_buftype == CIFS_LARGE_BUFFER)
4839 srch_inf->smallBuf = false;
4840 else if (resp_buftype == CIFS_SMALL_BUFFER)
4841 srch_inf->smallBuf = true;
4842 else
Joe Perchesa0a30362020-04-14 22:42:53 -07004843 cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004844
4845 return 0;
4846}
4847
4848int
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004849SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
4850 u64 persistent_fid, u64 volatile_fid, int index,
4851 struct cifs_search_info *srch_inf)
4852{
4853 struct smb_rqst rqst;
4854 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
4855 struct smb2_query_directory_rsp *rsp = NULL;
4856 int resp_buftype = CIFS_NO_BUFFER;
4857 struct kvec rsp_iov;
4858 int rc = 0;
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004859 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004860 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004861 int flags = 0;
4862
YueHaibingc4985c32020-01-17 10:57:17 +08004863 if (!ses || !(ses->server))
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004864 return -EIO;
4865
4866 if (smb3_encryption_required(tcon))
4867 flags |= CIFS_TRANSFORM_REQ;
4868
4869 memset(&rqst, 0, sizeof(struct smb_rqst));
4870 memset(&iov, 0, sizeof(iov));
4871 rqst.rq_iov = iov;
4872 rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
4873
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004874 rc = SMB2_query_directory_init(xid, tcon, server,
4875 &rqst, persistent_fid,
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004876 volatile_fid, index,
4877 srch_inf->info_level);
4878 if (rc)
4879 goto qdir_exit;
4880
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004881 rc = cifs_send_recv(xid, ses, server,
4882 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07004883 rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
Pavel Shilovskye5d04882012-09-19 16:03:26 +04004884
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004885 if (rc) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07004886 if (rc == -ENODATA &&
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004887 rsp->hdr.Status == STATUS_NO_MORE_FILES) {
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004888 trace_smb3_query_dir_done(xid, persistent_fid,
4889 tcon->tid, tcon->ses->Suid, index, 0);
Pavel Shilovsky52755802014-08-18 20:49:57 +04004890 srch_inf->endOfSearch = true;
4891 rc = 0;
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004892 } else {
4893 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
4894 tcon->ses->Suid, index, 0, rc);
Pavel Shilovsky8e6e72a2019-01-26 12:21:32 -08004895 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004896 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004897 goto qdir_exit;
4898 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004899
Ronnie Sahlbergaf08f9e2020-01-08 13:08:05 +10004900 rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype,
4901 srch_inf);
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004902 if (rc) {
4903 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
4904 tcon->ses->Suid, index, 0, rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004905 goto qdir_exit;
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004906 }
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004907 resp_buftype = CIFS_NO_BUFFER;
4908
Steve Frenchadb3b4e2019-02-25 13:51:11 -06004909 trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
4910 tcon->ses->Suid, index, srch_inf->entries_in_buffer);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004911
4912qdir_exit:
Ronnie Sahlberg0a177992020-01-08 13:08:04 +10004913 SMB2_query_directory_free(&rqst);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004914 free_rsp_buf(resp_buftype, rsp);
4915 return rc;
4916}
4917
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004918int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004919SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
4920 struct smb_rqst *rqst,
4921 u64 persistent_fid, u64 volatile_fid, u32 pid,
4922 u8 info_class, u8 info_type, u32 additional_info,
4923 void **data, unsigned int *size)
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004924{
4925 struct smb2_set_info_req *req;
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004926 struct kvec *iov = rqst->rq_iov;
4927 unsigned int i, total_len;
4928 int rc;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004929
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004930 rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
4931 (void **) &req, &total_len);
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004932 if (rc)
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004933 return rc;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07004934
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004935 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
Shirish Pargaonkardac95342017-06-28 22:37:00 -05004936 req->InfoType = info_type;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004937 req->FileInfoClass = info_class;
4938 req->PersistentFileId = persistent_fid;
4939 req->VolatileFileId = volatile_fid;
Shirish Pargaonkardac95342017-06-28 22:37:00 -05004940 req->AdditionalInformation = cpu_to_le32(additional_info);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004941
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004942 req->BufferOffset =
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11004943 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004944 req->BufferLength = cpu_to_le32(*size);
4945
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004946 memcpy(req->Buffer, *data, *size);
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11004947 total_len += *size;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004948
4949 iov[0].iov_base = (char *)req;
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11004950 /* 1 for Buffer */
4951 iov[0].iov_len = total_len - 1;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004952
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004953 for (i = 1; i < rqst->rq_nvec; i++) {
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004954 le32_add_cpu(&req->BufferLength, size[i]);
4955 iov[i].iov_base = (char *)data[i];
4956 iov[i].iov_len = size[i];
4957 }
4958
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004959 return 0;
4960}
4961
4962void
4963SMB2_set_info_free(struct smb_rqst *rqst)
4964{
Ronnie Sahlberg32a1fb32018-10-24 11:50:33 +10004965 if (rqst && rqst->rq_iov)
4966 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004967}
4968
4969static int
4970send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
4971 u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
4972 u8 info_type, u32 additional_info, unsigned int num,
4973 void **data, unsigned int *size)
4974{
4975 struct smb_rqst rqst;
4976 struct smb2_set_info_rsp *rsp = NULL;
4977 struct kvec *iov;
4978 struct kvec rsp_iov;
4979 int rc = 0;
4980 int resp_buftype;
4981 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004982 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004983 int flags = 0;
4984
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004985 if (!ses || !server)
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10004986 return -EIO;
4987
4988 if (!num)
4989 return -EINVAL;
4990
4991 if (smb3_encryption_required(tcon))
4992 flags |= CIFS_TRANSFORM_REQ;
4993
4994 iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
4995 if (!iov)
4996 return -ENOMEM;
4997
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10004998 memset(&rqst, 0, sizeof(struct smb_rqst));
4999 rqst.rq_iov = iov;
5000 rqst.rq_nvec = num;
5001
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005002 rc = SMB2_set_info_init(tcon, server,
5003 &rqst, persistent_fid, volatile_fid, pid,
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10005004 info_class, info_type, additional_info,
5005 data, size);
5006 if (rc) {
5007 kfree(iov);
5008 return rc;
5009 }
5010
5011
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005012 rc = cifs_send_recv(xid, ses, server,
5013 &rqst, &resp_buftype, flags,
Ronnie Sahlberg2fc803e2017-11-20 11:24:44 +11005014 &rsp_iov);
Ronnie Sahlbergba8ca112018-09-03 13:33:44 +10005015 SMB2_set_info_free(&rqst);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005016 rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07005017
Steve Frencheccb4422018-05-17 21:16:55 -05005018 if (rc != 0) {
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07005019 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05005020 trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
5021 ses->Suid, info_class, (__u32)info_type, rc);
5022 }
Steve French7d3fb242013-11-18 09:56:28 -06005023
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07005024 free_rsp_buf(resp_buftype, rsp);
5025 kfree(iov);
5026 return rc;
5027}
5028
5029int
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07005030SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10005031 u64 volatile_fid, u32 pid, __le64 *eof)
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07005032{
5033 struct smb2_file_eof_info info;
5034 void *data;
5035 unsigned int size;
5036
5037 info.EndOfFile = *eof;
5038
5039 data = &info;
5040 size = sizeof(struct smb2_file_eof_info);
5041
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10005042 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
Shirish Pargaonkardac95342017-06-28 22:37:00 -05005043 pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
5044 0, 1, &data, &size);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07005045}
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07005046
5047int
Shirish Pargaonkardac95342017-06-28 22:37:00 -05005048SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
5049 u64 persistent_fid, u64 volatile_fid,
5050 struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
5051{
5052 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5053 current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
5054 1, (void **)&pnntsd, &pacllen);
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07005055}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005056
5057int
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005058SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
5059 u64 persistent_fid, u64 volatile_fid,
5060 struct smb2_file_full_ea_info *buf, int len)
5061{
5062 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5063 current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
5064 0, 1, (void **)&buf, &len);
5065}
5066
5067int
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005068SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
5069 const u64 persistent_fid, const u64 volatile_fid,
5070 __u8 oplock_level)
5071{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005072 struct smb_rqst rqst;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005073 int rc;
Ronnie Sahlberg0d5a2882018-06-01 10:53:03 +10005074 struct smb2_oplock_break *req = NULL;
Ronnie Sahlberg21ad9482017-11-20 11:24:43 +11005075 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005076 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005077 int flags = CIFS_OBREAK_OP;
Ronnie Sahlberg21ad9482017-11-20 11:24:43 +11005078 unsigned int total_len;
5079 struct kvec iov[1];
5080 struct kvec rsp_iov;
5081 int resp_buf_type;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005082
Joe Perchesf96637b2013-05-04 22:12:25 -05005083 cifs_dbg(FYI, "SMB2_oplock_break\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005084 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
5085 (void **) &req, &total_len);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005086 if (rc)
5087 return rc;
5088
Steve French5a77e752018-05-09 17:43:08 -05005089 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005090 flags |= CIFS_TRANSFORM_REQ;
5091
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005092 req->VolatileFid = volatile_fid;
5093 req->PersistentFid = persistent_fid;
5094 req->OplockLevel = oplock_level;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005095 req->hdr.CreditRequest = cpu_to_le16(1);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005096
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10005097 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg21ad9482017-11-20 11:24:43 +11005098
5099 iov[0].iov_base = (char *)req;
5100 iov[0].iov_len = total_len;
5101
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005102 memset(&rqst, 0, sizeof(struct smb_rqst));
5103 rqst.rq_iov = iov;
5104 rqst.rq_nvec = 1;
5105
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005106 rc = cifs_send_recv(xid, ses, server,
5107 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005108 cifs_small_buf_release(req);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005109
5110 if (rc) {
5111 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05005112 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005113 }
5114
5115 return rc;
5116}
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005117
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005118void
5119smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
5120 struct kstatfs *kst)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005121{
5122 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
5123 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
5124 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
Sachin Prabhu42bec212017-08-03 13:09:03 +05305125 kst->f_bfree = kst->f_bavail =
5126 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005127 return;
5128}
5129
Steve French2d304212018-06-24 23:28:12 -05005130static void
5131copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
5132 struct kstatfs *kst)
5133{
5134 kst->f_bsize = le32_to_cpu(response_data->BlockSize);
5135 kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
5136 kst->f_bfree = le64_to_cpu(response_data->BlocksAvail);
5137 if (response_data->UserBlocksAvail == cpu_to_le64(-1))
5138 kst->f_bavail = kst->f_bfree;
5139 else
5140 kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
5141 if (response_data->TotalFileNodes != cpu_to_le64(-1))
5142 kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
5143 if (response_data->FreeFileNodes != cpu_to_le64(-1))
5144 kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
5145
5146 return;
5147}
Steve French2d304212018-06-24 23:28:12 -05005148
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005149static int
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005150build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
5151 struct TCP_Server_Info *server,
5152 int level, int outbuf_len, u64 persistent_fid,
5153 u64 volatile_fid)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005154{
5155 int rc;
5156 struct smb2_query_info_req *req;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005157 unsigned int total_len;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005158
Joe Perchesf96637b2013-05-04 22:12:25 -05005159 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005160
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005161 if ((tcon->ses == NULL) || server == NULL)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005162 return -EIO;
5163
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005164 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
5165 (void **) &req, &total_len);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005166 if (rc)
5167 return rc;
5168
5169 req->InfoType = SMB2_O_INFO_FILESYSTEM;
5170 req->FileInfoClass = level;
5171 req->PersistentFileId = persistent_fid;
5172 req->VolatileFileId = volatile_fid;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005173 /* 1 for pad */
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005174 req->InputBufferOffset =
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005175 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005176 req->OutputBufferLength = cpu_to_le32(
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005177 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005178
5179 iov->iov_base = (char *)req;
Ronnie Sahlbergb2fb7fe2017-11-20 11:24:46 +11005180 iov->iov_len = total_len;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005181 return 0;
5182}
5183
Steve French2d304212018-06-24 23:28:12 -05005184int
5185SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
5186 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5187{
5188 struct smb_rqst rqst;
5189 struct smb2_query_info_rsp *rsp = NULL;
5190 struct kvec iov;
5191 struct kvec rsp_iov;
5192 int rc = 0;
5193 int resp_buftype;
5194 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005195 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve French2d304212018-06-24 23:28:12 -05005196 FILE_SYSTEM_POSIX_INFO *info = NULL;
5197 int flags = 0;
5198
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005199 rc = build_qfs_info_req(&iov, tcon, server,
5200 FS_POSIX_INFORMATION,
Steve French2d304212018-06-24 23:28:12 -05005201 sizeof(FILE_SYSTEM_POSIX_INFO),
5202 persistent_fid, volatile_fid);
5203 if (rc)
5204 return rc;
5205
5206 if (smb3_encryption_required(tcon))
5207 flags |= CIFS_TRANSFORM_REQ;
5208
5209 memset(&rqst, 0, sizeof(struct smb_rqst));
5210 rqst.rq_iov = &iov;
5211 rqst.rq_nvec = 1;
5212
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005213 rc = cifs_send_recv(xid, ses, server,
5214 &rqst, &resp_buftype, flags, &rsp_iov);
Steve French2d304212018-06-24 23:28:12 -05005215 cifs_small_buf_release(iov.iov_base);
5216 if (rc) {
5217 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5218 goto posix_qfsinf_exit;
5219 }
5220 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
5221
5222 info = (FILE_SYSTEM_POSIX_INFO *)(
5223 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005224 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5225 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5226 sizeof(FILE_SYSTEM_POSIX_INFO));
Steve French2d304212018-06-24 23:28:12 -05005227 if (!rc)
5228 copy_posix_fs_info_to_kstatfs(info, fsdata);
5229
5230posix_qfsinf_exit:
5231 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
5232 return rc;
5233}
Steve French2d304212018-06-24 23:28:12 -05005234
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005235int
5236SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
5237 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5238{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005239 struct smb_rqst rqst;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005240 struct smb2_query_info_rsp *rsp = NULL;
5241 struct kvec iov;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005242 struct kvec rsp_iov;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005243 int rc = 0;
5244 int resp_buftype;
5245 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005246 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005247 struct smb2_fs_full_size_info *info = NULL;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005248 int flags = 0;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005249
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005250 rc = build_qfs_info_req(&iov, tcon, server,
5251 FS_FULL_SIZE_INFORMATION,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005252 sizeof(struct smb2_fs_full_size_info),
5253 persistent_fid, volatile_fid);
5254 if (rc)
5255 return rc;
5256
Steve French5a77e752018-05-09 17:43:08 -05005257 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005258 flags |= CIFS_TRANSFORM_REQ;
5259
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005260 memset(&rqst, 0, sizeof(struct smb_rqst));
5261 rqst.rq_iov = &iov;
5262 rqst.rq_nvec = 1;
5263
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005264 rc = cifs_send_recv(xid, ses, server,
5265 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005266 cifs_small_buf_release(iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005267 if (rc) {
5268 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
Steve French34f62642013-10-09 02:07:00 -05005269 goto qfsinf_exit;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005270 }
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005271 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005272
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005273 info = (struct smb2_fs_full_size_info *)(
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10005274 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005275 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5276 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5277 sizeof(struct smb2_fs_full_size_info));
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005278 if (!rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005279 smb2_copy_fs_info_to_kstatfs(info, fsdata);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005280
Steve French34f62642013-10-09 02:07:00 -05005281qfsinf_exit:
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005282 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Steve French34f62642013-10-09 02:07:00 -05005283 return rc;
5284}
5285
5286int
5287SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
Steven French21671142013-10-09 13:36:35 -05005288 u64 persistent_fid, u64 volatile_fid, int level)
Steve French34f62642013-10-09 02:07:00 -05005289{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005290 struct smb_rqst rqst;
Steve French34f62642013-10-09 02:07:00 -05005291 struct smb2_query_info_rsp *rsp = NULL;
5292 struct kvec iov;
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005293 struct kvec rsp_iov;
Steve French34f62642013-10-09 02:07:00 -05005294 int rc = 0;
Steven French21671142013-10-09 13:36:35 -05005295 int resp_buftype, max_len, min_len;
Steve French34f62642013-10-09 02:07:00 -05005296 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005297 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Steve French34f62642013-10-09 02:07:00 -05005298 unsigned int rsp_len, offset;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005299 int flags = 0;
Steve French34f62642013-10-09 02:07:00 -05005300
Steven French21671142013-10-09 13:36:35 -05005301 if (level == FS_DEVICE_INFORMATION) {
5302 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
5303 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
5304 } else if (level == FS_ATTRIBUTE_INFORMATION) {
5305 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
5306 min_len = MIN_FS_ATTR_INFO_SIZE;
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005307 } else if (level == FS_SECTOR_SIZE_INFORMATION) {
5308 max_len = sizeof(struct smb3_fs_ss_info);
5309 min_len = sizeof(struct smb3_fs_ss_info);
Steve French21ba3842018-06-24 23:18:52 -05005310 } else if (level == FS_VOLUME_INFORMATION) {
5311 max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
5312 min_len = sizeof(struct smb3_fs_vol_info);
Steven French21671142013-10-09 13:36:35 -05005313 } else {
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005314 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
Steven French21671142013-10-09 13:36:35 -05005315 return -EINVAL;
5316 }
5317
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005318 rc = build_qfs_info_req(&iov, tcon, server,
5319 level, max_len,
Steve French34f62642013-10-09 02:07:00 -05005320 persistent_fid, volatile_fid);
5321 if (rc)
5322 return rc;
5323
Steve French5a77e752018-05-09 17:43:08 -05005324 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005325 flags |= CIFS_TRANSFORM_REQ;
5326
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005327 memset(&rqst, 0, sizeof(struct smb_rqst));
5328 rqst.rq_iov = &iov;
5329 rqst.rq_nvec = 1;
5330
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005331 rc = cifs_send_recv(xid, ses, server,
5332 &rqst, &resp_buftype, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005333 cifs_small_buf_release(iov.iov_base);
Steve French34f62642013-10-09 02:07:00 -05005334 if (rc) {
5335 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5336 goto qfsattr_exit;
5337 }
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005338 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Steve French34f62642013-10-09 02:07:00 -05005339
5340 rsp_len = le32_to_cpu(rsp->OutputBufferLength);
5341 offset = le16_to_cpu(rsp->OutputBufferOffset);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10005342 rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
Steven French21671142013-10-09 13:36:35 -05005343 if (rc)
5344 goto qfsattr_exit;
5345
5346 if (level == FS_ATTRIBUTE_INFORMATION)
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005347 memcpy(&tcon->fsAttrInfo, offset
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10005348 + (char *)rsp, min_t(unsigned int,
Steven French21671142013-10-09 13:36:35 -05005349 rsp_len, max_len));
5350 else if (level == FS_DEVICE_INFORMATION)
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005351 memcpy(&tcon->fsDevInfo, offset
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10005352 + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005353 else if (level == FS_SECTOR_SIZE_INFORMATION) {
5354 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005355 (offset + (char *)rsp);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005356 tcon->ss_flags = le32_to_cpu(ss_info->Flags);
5357 tcon->perf_sector_size =
5358 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
Steve French21ba3842018-06-24 23:18:52 -05005359 } else if (level == FS_VOLUME_INFORMATION) {
5360 struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
5361 (offset + (char *)rsp);
5362 tcon->vol_serial_number = vol_info->VolumeSerialNumber;
5363 tcon->vol_create_time = vol_info->VolumeCreationTime;
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005364 }
Steve French34f62642013-10-09 02:07:00 -05005365
5366qfsattr_exit:
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005367 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005368 return rc;
5369}
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005370
5371int
5372smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
5373 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
5374 const __u32 num_lock, struct smb2_lock_element *buf)
5375{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005376 struct smb_rqst rqst;
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005377 int rc = 0;
5378 struct smb2_lock_req *req = NULL;
5379 struct kvec iov[2];
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005380 struct kvec rsp_iov;
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005381 int resp_buf_type;
5382 unsigned int count;
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10005383 int flags = CIFS_NO_RSP_BUF;
Ronnie Sahlbergced93672017-11-21 10:07:27 +11005384 unsigned int total_len;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005385 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005386
Joe Perchesf96637b2013-05-04 22:12:25 -05005387 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005388
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005389 rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
5390 (void **) &req, &total_len);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005391 if (rc)
5392 return rc;
5393
Steve French5a77e752018-05-09 17:43:08 -05005394 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005395 flags |= CIFS_TRANSFORM_REQ;
5396
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005397 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005398 req->LockCount = cpu_to_le16(num_lock);
5399
5400 req->PersistentFileId = persist_fid;
5401 req->VolatileFileId = volatile_fid;
5402
5403 count = num_lock * sizeof(struct smb2_lock_element);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005404
5405 iov[0].iov_base = (char *)req;
Ronnie Sahlbergced93672017-11-21 10:07:27 +11005406 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005407 iov[1].iov_base = (char *)buf;
5408 iov[1].iov_len = count;
5409
5410 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005411
5412 memset(&rqst, 0, sizeof(struct smb_rqst));
5413 rqst.rq_iov = iov;
5414 rqst.rq_nvec = 2;
5415
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005416 rc = cifs_send_recv(xid, tcon->ses, server,
5417 &rqst, &resp_buf_type, flags,
Ronnie Sahlbergced93672017-11-21 10:07:27 +11005418 &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005419 cifs_small_buf_release(req);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005420 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05005421 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005422 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
Steve Frencheccb4422018-05-17 21:16:55 -05005423 trace_smb3_lock_err(xid, persist_fid, tcon->tid,
5424 tcon->ses->Suid, rc);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005425 }
5426
5427 return rc;
5428}
5429
5430int
5431SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
5432 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
5433 const __u64 length, const __u64 offset, const __u32 lock_flags,
5434 const bool wait)
5435{
5436 struct smb2_lock_element lock;
5437
5438 lock.Offset = cpu_to_le64(offset);
5439 lock.Length = cpu_to_le64(length);
5440 lock.Flags = cpu_to_le32(lock_flags);
5441 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
5442 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
5443
5444 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
5445}
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005446
5447int
5448SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
5449 __u8 *lease_key, const __le32 lease_state)
5450{
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005451 struct smb_rqst rqst;
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005452 int rc;
5453 struct smb2_lease_ack *req = NULL;
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005454 struct cifs_ses *ses = tcon->ses;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005455 int flags = CIFS_OBREAK_OP;
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005456 unsigned int total_len;
5457 struct kvec iov[1];
5458 struct kvec rsp_iov;
5459 int resp_buf_type;
Steve French179e44d2018-09-28 19:44:23 -05005460 __u64 *please_key_high;
5461 __u64 *please_key_low;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005462 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005463
Joe Perchesf96637b2013-05-04 22:12:25 -05005464 cifs_dbg(FYI, "SMB2_lease_break\n");
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005465 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
5466 (void **) &req, &total_len);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005467 if (rc)
5468 return rc;
5469
Steve French5a77e752018-05-09 17:43:08 -05005470 if (smb3_encryption_required(tcon))
Pavel Shilovsky7fb89862016-10-31 13:49:30 -07005471 flags |= CIFS_TRANSFORM_REQ;
5472
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005473 req->hdr.CreditRequest = cpu_to_le16(1);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005474 req->StructureSize = cpu_to_le16(36);
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005475 total_len += 12;
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005476
5477 memcpy(req->LeaseKey, lease_key, 16);
5478 req->LeaseState = lease_state;
5479
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10005480 flags |= CIFS_NO_RSP_BUF;
Ronnie Sahlberg8eb79982017-11-21 11:04:37 +11005481
5482 iov[0].iov_base = (char *)req;
5483 iov[0].iov_len = total_len;
5484
Ronnie Sahlberg40eff452018-06-12 08:00:59 +10005485 memset(&rqst, 0, sizeof(struct smb_rqst));
5486 rqst.rq_iov = iov;
5487 rqst.rq_nvec = 1;
5488
Aurelien Aptel352d96f2020-05-31 12:38:22 -05005489 rc = cifs_send_recv(xid, ses, server,
5490 &rqst, &resp_buf_type, flags, &rsp_iov);
Pavel Shilovskyda502f72016-10-25 11:38:47 -07005491 cifs_small_buf_release(req);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005492
Aurelien Apteld339adc2019-01-31 13:46:07 +01005493 please_key_low = (__u64 *)lease_key;
5494 please_key_high = (__u64 *)(lease_key+8);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005495 if (rc) {
5496 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Steve French179e44d2018-09-28 19:44:23 -05005497 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
5498 ses->Suid, *please_key_low, *please_key_high, rc);
Joe Perchesf96637b2013-05-04 22:12:25 -05005499 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
Steve French179e44d2018-09-28 19:44:23 -05005500 } else
5501 trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
5502 ses->Suid, *please_key_low, *please_key_high);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07005503
5504 return rc;
5505}