blob: b25623e3fe3d5596e10202c5d8abcc2fd96f76f2 [file] [log] [blame]
Steve French929be902021-06-18 00:31:49 -05001// SPDX-License-Identifier: LGPL-2.1
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04002/*
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04003 *
4 * Copyright (C) International Business Machines Corp., 2002,2011
5 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04009 */
10#include <linux/ctype.h>
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040011#include "cifsglob.h"
12#include "cifsproto.h"
13#include "smb2proto.h"
14#include "cifs_debug.h"
15#include "cifs_unicode.h"
16#include "smb2status.h"
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070017#include "smb2glob.h"
Aurelien Apteld70e9fa2019-09-20 06:31:10 +020018#include "nterr.h"
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040019
20static int
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +090021check_smb2_hdr(struct smb2_hdr *shdr, __u64 mid)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040022{
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070023 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Sachin Prabhu9235d092014-12-09 17:37:00 +000024
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040025 /*
26 * Make sure that this really is an SMB, that it is a response,
27 * and that the message ids match.
28 */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070029 if ((shdr->ProtocolId == SMB2_PROTO_NUMBER) &&
Sachin Prabhu9235d092014-12-09 17:37:00 +000030 (mid == wire_mid)) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070031 if (shdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040032 return 0;
33 else {
34 /* only one valid case where server sends us request */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070035 if (shdr->Command == SMB2_OPLOCK_BREAK)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040036 return 0;
37 else
Joe Perchesf96637b2013-05-04 22:12:25 -050038 cifs_dbg(VFS, "Received Request not response\n");
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040039 }
40 } else { /* bad signature or mid */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070041 if (shdr->ProtocolId != SMB2_PROTO_NUMBER)
Joe Perchesf96637b2013-05-04 22:12:25 -050042 cifs_dbg(VFS, "Bad protocol string signature header %x\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070043 le32_to_cpu(shdr->ProtocolId));
Sachin Prabhu9235d092014-12-09 17:37:00 +000044 if (mid != wire_mid)
Joe Perchesf96637b2013-05-04 22:12:25 -050045 cifs_dbg(VFS, "Mids do not match: %llu and %llu\n",
Sachin Prabhu9235d092014-12-09 17:37:00 +000046 mid, wire_mid);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040047 }
Sachin Prabhu9235d092014-12-09 17:37:00 +000048 cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040049 return 1;
50}
51
52/*
53 * The following table defines the expected "StructureSize" of SMB2 responses
54 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS responses.
55 *
56 * Note that commands are defined in smb2pdu.h in le16 but the array below is
57 * indexed by command in host byte order
58 */
59static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
Fabian Frederickbc09d142014-12-10 15:41:15 -080060 /* SMB2_NEGOTIATE */ cpu_to_le16(65),
61 /* SMB2_SESSION_SETUP */ cpu_to_le16(9),
62 /* SMB2_LOGOFF */ cpu_to_le16(4),
63 /* SMB2_TREE_CONNECT */ cpu_to_le16(16),
64 /* SMB2_TREE_DISCONNECT */ cpu_to_le16(4),
65 /* SMB2_CREATE */ cpu_to_le16(89),
66 /* SMB2_CLOSE */ cpu_to_le16(60),
67 /* SMB2_FLUSH */ cpu_to_le16(4),
68 /* SMB2_READ */ cpu_to_le16(17),
69 /* SMB2_WRITE */ cpu_to_le16(17),
70 /* SMB2_LOCK */ cpu_to_le16(4),
71 /* SMB2_IOCTL */ cpu_to_le16(49),
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040072 /* BB CHECK this ... not listed in documentation */
Fabian Frederickbc09d142014-12-10 15:41:15 -080073 /* SMB2_CANCEL */ cpu_to_le16(0),
74 /* SMB2_ECHO */ cpu_to_le16(4),
75 /* SMB2_QUERY_DIRECTORY */ cpu_to_le16(9),
76 /* SMB2_CHANGE_NOTIFY */ cpu_to_le16(9),
77 /* SMB2_QUERY_INFO */ cpu_to_le16(9),
78 /* SMB2_SET_INFO */ cpu_to_le16(2),
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040079 /* BB FIXME can also be 44 for lease break */
Fabian Frederickbc09d142014-12-10 15:41:15 -080080 /* SMB2_OPLOCK_BREAK */ cpu_to_le16(24)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040081};
82
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +090083#define SMB311_NEGPROT_BASE_SIZE (sizeof(struct smb2_hdr) + sizeof(struct smb2_negotiate_rsp))
Steve Frenchbc7c4122020-12-09 01:12:35 -060084
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +090085static __u32 get_neg_ctxt_len(struct smb2_hdr *hdr, __u32 len,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +100086 __u32 non_ctxlen)
Steve French136ff1b2018-04-08 16:14:31 -050087{
88 __u16 neg_count;
89 __u32 nc_offset, size_of_pad_before_neg_ctxts;
90 struct smb2_negotiate_rsp *pneg_rsp = (struct smb2_negotiate_rsp *)hdr;
91
92 /* Negotiate contexts are only valid for latest dialect SMB3.11 */
93 neg_count = le16_to_cpu(pneg_rsp->NegotiateContextCount);
94 if ((neg_count == 0) ||
95 (pneg_rsp->DialectRevision != cpu_to_le16(SMB311_PROT_ID)))
96 return 0;
97
Steve French145024e2020-12-09 21:25:13 -060098 /*
99 * if SPNEGO blob present (ie the RFC2478 GSS info which indicates
100 * which security mechanisms the server supports) make sure that
101 * the negotiate contexts start after it
102 */
Steve French136ff1b2018-04-08 16:14:31 -0500103 nc_offset = le32_to_cpu(pneg_rsp->NegotiateContextOffset);
Steve French145024e2020-12-09 21:25:13 -0600104 /*
105 * non_ctxlen is at least shdr->StructureSize + pdu->StructureSize2
106 * and the latter is 1 byte bigger than the fix-sized area of the
107 * NEGOTIATE response
108 */
Steve Frenchbc7c4122020-12-09 01:12:35 -0600109 if (nc_offset + 1 < non_ctxlen) {
110 pr_warn_once("Invalid negotiate context offset %d\n", nc_offset);
Steve French136ff1b2018-04-08 16:14:31 -0500111 return 0;
Steve Frenchbc7c4122020-12-09 01:12:35 -0600112 } else if (nc_offset + 1 == non_ctxlen) {
113 cifs_dbg(FYI, "no SPNEGO security blob in negprot rsp\n");
114 size_of_pad_before_neg_ctxts = 0;
115 } else if (non_ctxlen == SMB311_NEGPROT_BASE_SIZE)
116 /* has padding, but no SPNEGO blob */
117 size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen + 1;
118 else
119 size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen;
Steve French136ff1b2018-04-08 16:14:31 -0500120
121 /* Verify that at least minimal negotiate contexts fit within frame */
122 if (len < nc_offset + (neg_count * sizeof(struct smb2_neg_context))) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700123 pr_warn_once("negotiate context goes beyond end\n");
Steve French136ff1b2018-04-08 16:14:31 -0500124 return 0;
125 }
126
127 cifs_dbg(FYI, "length of negcontexts %d pad %d\n",
128 len - nc_offset, size_of_pad_before_neg_ctxts);
129
130 /* length of negcontexts including pad from end of sec blob to them */
131 return (len - nc_offset) + size_of_pad_before_neg_ctxts;
132}
Steve French136ff1b2018-04-08 16:14:31 -0500133
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400134int
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000135smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400136{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900137 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
138 struct smb2_pdu *pdu = (struct smb2_pdu *)shdr;
Steve French373512e2015-12-18 13:05:30 -0600139 __u64 mid;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400140 __u32 clc_len; /* calculated length */
141 int command;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900142 int pdu_size = sizeof(struct smb2_pdu);
143 int hdr_size = sizeof(struct smb2_hdr);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400144
145 /*
146 * Add function to do table lookup of StructureSize by command
147 * ie Validate the wct via smb2_struct_sizes table above
148 */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700149 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Steve French373512e2015-12-18 13:05:30 -0600150 struct smb2_transform_hdr *thdr =
151 (struct smb2_transform_hdr *)buf;
152 struct cifs_ses *ses = NULL;
Steve French373512e2015-12-18 13:05:30 -0600153
154 /* decrypt frame now that it is completely read in */
155 spin_lock(&cifs_tcp_ses_lock);
Baokun Li647f5922021-06-18 12:02:32 +0800156 list_for_each_entry(ses, &srvr->smb_ses_list, smb_ses_list) {
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900157 if (ses->Suid == le64_to_cpu(thdr->SessionId))
Steve French373512e2015-12-18 13:05:30 -0600158 break;
Steve French373512e2015-12-18 13:05:30 -0600159 }
160 spin_unlock(&cifs_tcp_ses_lock);
Dan Carpenter1689b0b2021-06-22 13:59:55 +0300161 if (list_entry_is_head(ses, &srvr->smb_ses_list,
162 smb_ses_list)) {
Steve French373512e2015-12-18 13:05:30 -0600163 cifs_dbg(VFS, "no decryption - session id not found\n");
164 return 1;
165 }
166 }
167
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700168 mid = le64_to_cpu(shdr->MessageId);
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000169 if (len < pdu_size) {
170 if ((len >= hdr_size)
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700171 && (shdr->Status != 0)) {
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400172 pdu->StructureSize2 = 0;
173 /*
174 * As with SMB/CIFS, on some error cases servers may
175 * not return wct properly
176 */
177 return 0;
178 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500179 cifs_dbg(VFS, "Length less than SMB header size\n");
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400180 }
181 return 1;
182 }
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000183 if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500184 cifs_dbg(VFS, "SMB length greater than maximum, mid=%llu\n",
185 mid);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400186 return 1;
187 }
188
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700189 if (check_smb2_hdr(shdr, mid))
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400190 return 1;
191
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700192 if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700193 cifs_dbg(VFS, "Invalid structure size %u\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700194 le16_to_cpu(shdr->StructureSize));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400195 return 1;
196 }
197
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700198 command = le16_to_cpu(shdr->Command);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400199 if (command >= NUMBER_OF_SMB2_COMMANDS) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700200 cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400201 return 1;
202 }
203
204 if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700205 if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700206 pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2)) {
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400207 /* error packets have 9 byte structure size */
Joe Perchesa0a30362020-04-14 22:42:53 -0700208 cifs_dbg(VFS, "Invalid response size %u for command %d\n",
Joe Perchesf96637b2013-05-04 22:12:25 -0500209 le16_to_cpu(pdu->StructureSize2), command);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400210 return 1;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700211 } else if (command == SMB2_OPLOCK_BREAK_HE
212 && (shdr->Status == 0)
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700213 && (le16_to_cpu(pdu->StructureSize2) != 44)
214 && (le16_to_cpu(pdu->StructureSize2) != 36)) {
215 /* special case for SMB2.1 lease break message */
Joe Perchesa0a30362020-04-14 22:42:53 -0700216 cifs_dbg(VFS, "Invalid response size %d for oplock break\n",
Joe Perchesf96637b2013-05-04 22:12:25 -0500217 le16_to_cpu(pdu->StructureSize2));
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700218 return 1;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400219 }
220 }
221
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000222 clc_len = smb2_calc_size(buf, srvr);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400223
Steve French136ff1b2018-04-08 16:14:31 -0500224 if (shdr->Command == SMB2_NEGOTIATE)
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000225 clc_len += get_neg_ctxt_len(shdr, len, clc_len);
Steve French0fdfef92018-06-28 19:30:23 -0500226
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000227 if (len != clc_len) {
228 cifs_dbg(FYI, "Calculated size %u length %u mismatch mid %llu\n",
229 clc_len, len, mid);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +0400230 /* create failed on symlink */
231 if (command == SMB2_CREATE_HE &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700232 shdr->Status == STATUS_STOPPED_ON_SYMLINK)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +0400233 return 0;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700234 /* Windows 7 server returns 24 bytes more */
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000235 if (clc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE)
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700236 return 0;
Steve French754789a2014-08-15 23:49:01 -0500237 /* server can return one byte more due to implied bcc[0] */
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000238 if (clc_len == len + 1)
Pavel Shilovsky74112862012-07-27 01:20:41 +0400239 return 0;
Steve French754789a2014-08-15 23:49:01 -0500240
241 /*
Ronnie Sahlberge6c47dd2018-08-22 12:19:24 +1000242 * Some windows servers (win2016) will pad also the final
243 * PDU in a compound to 8 bytes.
244 */
245 if (((clc_len + 7) & ~7) == len)
246 return 0;
247
248 /*
Steve French754789a2014-08-15 23:49:01 -0500249 * MacOS server pads after SMB2.1 write response with 3 bytes
250 * of junk. Other servers match RFC1001 len to actual
251 * SMB2/SMB3 frame length (header + smb2 response specific data)
Steve French25f25732018-08-29 09:22:22 -0500252 * Some windows servers also pad up to 8 bytes when compounding.
Steve French754789a2014-08-15 23:49:01 -0500253 */
Steve French037d0502019-11-08 01:01:35 -0600254 if (clc_len < len)
Steve French754789a2014-08-15 23:49:01 -0500255 return 0;
Steve French037d0502019-11-08 01:01:35 -0600256
Steve French25f25732018-08-29 09:22:22 -0500257 pr_warn_once(
258 "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",
259 len, clc_len, command, mid);
Steve French754789a2014-08-15 23:49:01 -0500260
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400261 return 1;
262 }
263 return 0;
264}
265
266/*
267 * The size of the variable area depends on the offset and length fields
268 * located in different fields for various SMB2 responses. SMB2 responses
269 * with no variable length info, show an offset of zero for the offset field.
270 */
271static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
272 /* SMB2_NEGOTIATE */ true,
273 /* SMB2_SESSION_SETUP */ true,
274 /* SMB2_LOGOFF */ false,
275 /* SMB2_TREE_CONNECT */ false,
276 /* SMB2_TREE_DISCONNECT */ false,
277 /* SMB2_CREATE */ true,
278 /* SMB2_CLOSE */ false,
279 /* SMB2_FLUSH */ false,
280 /* SMB2_READ */ true,
281 /* SMB2_WRITE */ false,
282 /* SMB2_LOCK */ false,
283 /* SMB2_IOCTL */ true,
284 /* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */
285 /* SMB2_ECHO */ false,
286 /* SMB2_QUERY_DIRECTORY */ true,
287 /* SMB2_CHANGE_NOTIFY */ true,
288 /* SMB2_QUERY_INFO */ true,
289 /* SMB2_SET_INFO */ false,
290 /* SMB2_OPLOCK_BREAK */ false
291};
292
293/*
294 * Returns the pointer to the beginning of the data area. Length of the data
295 * area and the offset to it (from the beginning of the smb are also returned.
296 */
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400297char *
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900298smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400299{
300 *off = 0;
301 *len = 0;
302
303 /* error responses do not have data area */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700304 if (shdr->Status && shdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000305 (((struct smb2_err_rsp *)shdr)->StructureSize) ==
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400306 SMB2_ERROR_STRUCTURE_SIZE2)
307 return NULL;
308
309 /*
310 * Following commands have data areas so we have to get the location
311 * of the data buffer offset and data buffer length for the particular
312 * command.
313 */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700314 switch (shdr->Command) {
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400315 case SMB2_NEGOTIATE:
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400316 *off = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000317 ((struct smb2_negotiate_rsp *)shdr)->SecurityBufferOffset);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400318 *len = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000319 ((struct smb2_negotiate_rsp *)shdr)->SecurityBufferLength);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400320 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400321 case SMB2_SESSION_SETUP:
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400322 *off = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000323 ((struct smb2_sess_setup_rsp *)shdr)->SecurityBufferOffset);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400324 *len = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000325 ((struct smb2_sess_setup_rsp *)shdr)->SecurityBufferLength);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400326 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400327 case SMB2_CREATE:
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400328 *off = le32_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000329 ((struct smb2_create_rsp *)shdr)->CreateContextsOffset);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400330 *len = le32_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000331 ((struct smb2_create_rsp *)shdr)->CreateContextsLength);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400332 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400333 case SMB2_QUERY_INFO:
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400334 *off = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000335 ((struct smb2_query_info_rsp *)shdr)->OutputBufferOffset);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400336 *len = le32_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000337 ((struct smb2_query_info_rsp *)shdr)->OutputBufferLength);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400338 break;
339 case SMB2_READ:
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000340 /* TODO: is this a bug ? */
341 *off = ((struct smb2_read_rsp *)shdr)->DataOffset;
342 *len = le32_to_cpu(((struct smb2_read_rsp *)shdr)->DataLength);
Pavel Shilovsky09a47072012-09-18 16:20:29 -0700343 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400344 case SMB2_QUERY_DIRECTORY:
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -0700345 *off = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000346 ((struct smb2_query_directory_rsp *)shdr)->OutputBufferOffset);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -0700347 *len = le32_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000348 ((struct smb2_query_directory_rsp *)shdr)->OutputBufferLength);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -0700349 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400350 case SMB2_IOCTL:
Steve French4a72daf2013-06-25 00:20:49 -0500351 *off = le32_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000352 ((struct smb2_ioctl_rsp *)shdr)->OutputOffset);
353 *len = le32_to_cpu(
354 ((struct smb2_ioctl_rsp *)shdr)->OutputCount);
Steve French4a72daf2013-06-25 00:20:49 -0500355 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400356 case SMB2_CHANGE_NOTIFY:
Steve French86681152020-07-07 23:43:39 -0500357 *off = le16_to_cpu(
358 ((struct smb2_change_notify_rsp *)shdr)->OutputBufferOffset);
359 *len = le32_to_cpu(
360 ((struct smb2_change_notify_rsp *)shdr)->OutputBufferLength);
361 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400362 default:
Steve French86681152020-07-07 23:43:39 -0500363 cifs_dbg(VFS, "no length check for command %d\n", le16_to_cpu(shdr->Command));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400364 break;
365 }
366
367 /*
368 * Invalid length or offset probably means data area is invalid, but
369 * we have little choice but to ignore the data area in this case.
370 */
371 if (*off > 4096) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500372 cifs_dbg(VFS, "offset %d too large, data area ignored\n", *off);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400373 *len = 0;
374 *off = 0;
375 } else if (*off < 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500376 cifs_dbg(VFS, "negative offset %d to data invalid ignore data area\n",
377 *off);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400378 *off = 0;
379 *len = 0;
380 } else if (*len < 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500381 cifs_dbg(VFS, "negative data length %d invalid, data area ignored\n",
382 *len);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400383 *len = 0;
384 } else if (*len > 128 * 1024) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500385 cifs_dbg(VFS, "data area larger than 128K: %d\n", *len);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400386 *len = 0;
387 }
388
389 /* return pointer to beginning of data area, ie offset from SMB start */
390 if ((*off != 0) && (*len != 0))
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700391 return (char *)shdr + *off;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400392 else
393 return NULL;
394}
395
396/*
397 * Calculate the size of the SMB message based on the fixed header
398 * portion, the number of word parameters and the data portion of the message.
399 */
400unsigned int
Ronnie Sahlberg9ec672b2018-04-22 15:30:12 -0600401smb2_calc_size(void *buf, struct TCP_Server_Info *srvr)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400402{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900403 struct smb2_pdu *pdu = (struct smb2_pdu *)buf;
404 struct smb2_hdr *shdr = &pdu->hdr;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400405 int offset; /* the offset from the beginning of SMB to data area */
406 int data_length; /* the length of the variable length data area */
407 /* Structure Size has already been checked to make sure it is 64 */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000408 int len = le16_to_cpu(shdr->StructureSize);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400409
410 /*
411 * StructureSize2, ie length of fixed parameter area has already
412 * been checked to make sure it is the correct length.
413 */
414 len += le16_to_cpu(pdu->StructureSize2);
415
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700416 if (has_smb2_data_area[le16_to_cpu(shdr->Command)] == false)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400417 goto calc_size_exit;
418
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000419 smb2_get_data_area_len(&offset, &data_length, shdr);
Joe Perchesf96637b2013-05-04 22:12:25 -0500420 cifs_dbg(FYI, "SMB2 data length %d offset %d\n", data_length, offset);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400421
422 if (data_length > 0) {
423 /*
424 * Check to make sure that data area begins after fixed area,
425 * Note that last byte of the fixed area is part of data area
426 * for some commands, typically those with odd StructureSize,
Ronnie Sahlberg84f0cbf2018-06-01 10:53:04 +1000427 * so we must add one to the calculation.
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400428 */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000429 if (offset + 1 < len) {
430 cifs_dbg(VFS, "data area offset %d overlaps SMB2 header %d\n",
431 offset + 1, len);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400432 data_length = 0;
433 } else {
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000434 len = offset + data_length;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400435 }
436 }
437calc_size_exit:
Joe Perchesf96637b2013-05-04 22:12:25 -0500438 cifs_dbg(FYI, "SMB2 len %d\n", len);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400439 return len;
440}
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400441
442/* Note: caller must free return buffer */
443__le16 *
444cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
445{
446 int len;
447 const char *start_of_path;
448 __le16 *to;
Steve Frencha4153cb2014-09-25 14:01:34 -0500449 int map_type;
450
451 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
452 map_type = SFM_MAP_UNI_RSVD;
453 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
454 map_type = SFU_MAP_UNI_RSVD;
455 else
456 map_type = NO_MAP_UNI_RSVD;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400457
458 /* Windows doesn't allow paths beginning with \ */
459 if (from[0] == '\\')
460 start_of_path = from + 1;
Steve French0fdfef92018-06-28 19:30:23 -0500461
Steve Frenchce558b02018-05-31 19:16:54 -0500462 /* SMB311 POSIX extensions paths do not include leading slash */
Aurelien Aptel8ddecf52018-06-04 22:29:35 +0200463 else if (cifs_sb_master_tlink(cifs_sb) &&
Steve Frenchd819d292018-06-14 22:30:56 -0500464 cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
465 (from[0] == '/')) {
Steve Frenchce558b02018-05-31 19:16:54 -0500466 start_of_path = from + 1;
Steve French0fdfef92018-06-28 19:30:23 -0500467 } else
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400468 start_of_path = from;
Steve Frenchce558b02018-05-31 19:16:54 -0500469
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400470 to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len,
Steve Frencha4153cb2014-09-25 14:01:34 -0500471 cifs_sb->local_nls, map_type);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400472 return to;
473}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700474
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700475__le32
476smb2_get_lease_state(struct cifsInodeInfo *cinode)
477{
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400478 __le32 lease = 0;
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700479
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400480 if (CIFS_CACHE_WRITE(cinode))
481 lease |= SMB2_LEASE_WRITE_CACHING;
482 if (CIFS_CACHE_HANDLE(cinode))
483 lease |= SMB2_LEASE_HANDLE_CACHING;
484 if (CIFS_CACHE_READ(cinode))
485 lease |= SMB2_LEASE_READ_CACHING;
486 return lease;
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700487}
488
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700489struct smb2_lease_break_work {
490 struct work_struct lease_break;
491 struct tcon_link *tlink;
492 __u8 lease_key[16];
493 __le32 lease_state;
494};
495
496static void
497cifs_ses_oplock_break(struct work_struct *work)
498{
499 struct smb2_lease_break_work *lw = container_of(work,
500 struct smb2_lease_break_work, lease_break);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000501 int rc = 0;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700502
503 rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key,
504 lw->lease_state);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000505
Joe Perchesf96637b2013-05-04 22:12:25 -0500506 cifs_dbg(FYI, "Lease release rc %d\n", rc);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700507 cifs_put_tlink(lw->tlink);
508 kfree(lw);
509}
510
Paul Aurichbaf57b52020-07-09 22:01:16 -0700511static void
512smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key,
513 __le32 new_lease_state)
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400514{
Paul Aurichbaf57b52020-07-09 22:01:16 -0700515 struct smb2_lease_break_work *lw;
516
517 lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
518 if (!lw) {
519 cifs_put_tlink(tlink);
520 return;
521 }
522
523 INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
524 lw->tlink = tlink;
525 lw->lease_state = new_lease_state;
526 memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE);
527 queue_work(cifsiod_wq, &lw->lease_break);
528}
529
530static bool
531smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
532{
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400533 __u8 lease_state;
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400534 struct cifsFileInfo *cfile;
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400535 struct cifsInodeInfo *cinode;
536 int ack_req = le32_to_cpu(rsp->Flags &
537 SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
538
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400539 lease_state = le32_to_cpu(rsp->NewLeaseState);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400540
Baokun Li647f5922021-06-18 12:02:32 +0800541 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
David Howells2b0143b2015-03-17 22:25:59 +0000542 cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400543
544 if (memcmp(cinode->lease_key, rsp->LeaseKey,
545 SMB2_LEASE_KEY_SIZE))
546 continue;
547
548 cifs_dbg(FYI, "found in the open list\n");
Steve French59b04c52014-08-02 21:16:48 -0500549 cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
Pavel Shilovsky9bd45402019-10-29 16:51:19 -0700550 lease_state);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400551
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400552 if (ack_req)
553 cfile->oplock_break_cancelled = false;
554 else
555 cfile->oplock_break_cancelled = true;
556
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -0800557 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
558
Pavel Shilovsky9bd45402019-10-29 16:51:19 -0700559 cfile->oplock_epoch = le16_to_cpu(rsp->Epoch);
560 cfile->oplock_level = lease_state;
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -0800561
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100562 cifs_queue_oplock_break(cfile);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400563 return true;
564 }
565
Paul Aurichbaf57b52020-07-09 22:01:16 -0700566 return false;
567}
568
569static struct cifs_pending_open *
570smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
571 struct smb2_lease_break *rsp)
572{
573 __u8 lease_state = le32_to_cpu(rsp->NewLeaseState);
574 int ack_req = le32_to_cpu(rsp->Flags &
575 SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
576 struct cifs_pending_open *open;
577 struct cifs_pending_open *found = NULL;
578
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400579 list_for_each_entry(open, &tcon->pending_opens, olist) {
580 if (memcmp(open->lease_key, rsp->LeaseKey,
581 SMB2_LEASE_KEY_SIZE))
582 continue;
583
584 if (!found && ack_req) {
Paul Aurichbaf57b52020-07-09 22:01:16 -0700585 found = open;
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400586 }
587
588 cifs_dbg(FYI, "found in the pending open list\n");
Steve French59b04c52014-08-02 21:16:48 -0500589 cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
Pavel Shilovsky9bd45402019-10-29 16:51:19 -0700590 lease_state);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400591
592 open->oplock = lease_state;
593 }
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000594
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400595 return found;
596}
597
598static bool
599smb2_is_valid_lease_break(char *buffer)
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700600{
601 struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400602 struct TCP_Server_Info *server;
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700603 struct cifs_ses *ses;
604 struct cifs_tcon *tcon;
Paul Aurichbaf57b52020-07-09 22:01:16 -0700605 struct cifs_pending_open *open;
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700606
Joe Perchesf96637b2013-05-04 22:12:25 -0500607 cifs_dbg(FYI, "Checking for lease break\n");
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700608
609 /* look up tcon based on tid & uid */
610 spin_lock(&cifs_tcp_ses_lock);
Baokun Li647f5922021-06-18 12:02:32 +0800611 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
612 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
613 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
Steve French3afca262016-09-22 18:58:16 -0500614 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400615 cifs_stats_inc(
616 &tcon->stats.cifs_stats.num_oplock_brks);
Paul Aurichbaf57b52020-07-09 22:01:16 -0700617 if (smb2_tcon_has_lease(tcon, rsp)) {
Steve French3afca262016-09-22 18:58:16 -0500618 spin_unlock(&tcon->open_file_lock);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400619 spin_unlock(&cifs_tcp_ses_lock);
620 return true;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700621 }
Paul Aurichbaf57b52020-07-09 22:01:16 -0700622 open = smb2_tcon_find_pending_open_lease(tcon,
623 rsp);
624 if (open) {
625 __u8 lease_key[SMB2_LEASE_KEY_SIZE];
626 struct tcon_link *tlink;
627
628 tlink = cifs_get_tlink(open->tlink);
629 memcpy(lease_key, open->lease_key,
630 SMB2_LEASE_KEY_SIZE);
631 spin_unlock(&tcon->open_file_lock);
632 spin_unlock(&cifs_tcp_ses_lock);
633 smb2_queue_pending_open_break(tlink,
634 lease_key,
635 rsp->NewLeaseState);
636 return true;
637 }
Steve French3afca262016-09-22 18:58:16 -0500638 spin_unlock(&tcon->open_file_lock);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000639
640 if (tcon->crfid.is_valid &&
641 !memcmp(rsp->LeaseKey,
642 tcon->crfid.fid->lease_key,
643 SMB2_LEASE_KEY_SIZE)) {
Ronnie Sahlberged20f542021-03-09 09:07:33 +1000644 tcon->crfid.time = 0;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000645 INIT_WORK(&tcon->crfid.lease_break,
646 smb2_cached_lease_break);
647 queue_work(cifsiod_wq,
648 &tcon->crfid.lease_break);
649 spin_unlock(&cifs_tcp_ses_lock);
650 return true;
651 }
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700652 }
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700653 }
654 }
655 spin_unlock(&cifs_tcp_ses_lock);
Joe Perchesf96637b2013-05-04 22:12:25 -0500656 cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700657 return false;
658}
659
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700660bool
661smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
662{
Ronnie Sahlberg0d5a2882018-06-01 10:53:03 +1000663 struct smb2_oplock_break *rsp = (struct smb2_oplock_break *)buffer;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700664 struct cifs_ses *ses;
665 struct cifs_tcon *tcon;
666 struct cifsInodeInfo *cinode;
667 struct cifsFileInfo *cfile;
668
Joe Perchesf96637b2013-05-04 22:12:25 -0500669 cifs_dbg(FYI, "Checking for oplock break\n");
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700670
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900671 if (rsp->hdr.Command != SMB2_OPLOCK_BREAK)
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700672 return false;
673
Steve French12e8a202012-09-19 09:19:39 -0700674 if (rsp->StructureSize !=
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700675 smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700676 if (le16_to_cpu(rsp->StructureSize) == 44)
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400677 return smb2_is_valid_lease_break(buffer);
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700678 else
679 return false;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700680 }
681
Steve French59b04c52014-08-02 21:16:48 -0500682 cifs_dbg(FYI, "oplock level 0x%x\n", rsp->OplockLevel);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700683
684 /* look up tcon based on tid & uid */
685 spin_lock(&cifs_tcp_ses_lock);
Baokun Li647f5922021-06-18 12:02:32 +0800686 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
687 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700688
Steve French3afca262016-09-22 18:58:16 -0500689 spin_lock(&tcon->open_file_lock);
Baokun Li647f5922021-06-18 12:02:32 +0800690 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700691 if (rsp->PersistentFid !=
692 cfile->fid.persistent_fid ||
693 rsp->VolatileFid !=
694 cfile->fid.volatile_fid)
695 continue;
696
Joe Perchesf96637b2013-05-04 22:12:25 -0500697 cifs_dbg(FYI, "file id match, oplock break\n");
Pavel Shilovskyfa9c2362019-10-31 14:18:57 -0700698 cifs_stats_inc(
699 &tcon->stats.cifs_stats.num_oplock_brks);
David Howells2b0143b2015-03-17 22:25:59 +0000700 cinode = CIFS_I(d_inode(cfile->dentry));
Steve French3afca262016-09-22 18:58:16 -0500701 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +0400702 if (!CIFS_CACHE_WRITE(cinode) &&
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700703 rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
704 cfile->oplock_break_cancelled = true;
705 else
706 cfile->oplock_break_cancelled = false;
707
Sachin Prabhuc11f1df2014-03-11 16:11:47 +0000708 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
709 &cinode->flags);
710
Pavel Shilovsky9bd45402019-10-29 16:51:19 -0700711 cfile->oplock_epoch = 0;
712 cfile->oplock_level = rsp->OplockLevel;
713
Steve French3afca262016-09-22 18:58:16 -0500714 spin_unlock(&cfile->file_info_lock);
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100715
716 cifs_queue_oplock_break(cfile);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700717
Steve French3afca262016-09-22 18:58:16 -0500718 spin_unlock(&tcon->open_file_lock);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700719 spin_unlock(&cifs_tcp_ses_lock);
720 return true;
721 }
Steve French3afca262016-09-22 18:58:16 -0500722 spin_unlock(&tcon->open_file_lock);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700723 }
724 }
725 spin_unlock(&cifs_tcp_ses_lock);
Vincent Whitchurch219481a2021-03-19 14:57:11 +0100726 cifs_dbg(FYI, "No file id matched, oplock break ignored\n");
727 return true;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700728}
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800729
730void
731smb2_cancelled_close_fid(struct work_struct *work)
732{
733 struct close_cancelled_open *cancelled = container_of(work,
734 struct close_cancelled_open, work);
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600735 struct cifs_tcon *tcon = cancelled->tcon;
736 int rc;
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800737
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600738 if (cancelled->mid)
Paulo Alcantarabf1bc6942021-03-08 12:00:47 -0300739 cifs_tcon_dbg(VFS, "Close unmatched open for MID:%llu\n",
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600740 cancelled->mid);
741 else
742 cifs_tcon_dbg(VFS, "Close interrupted close\n");
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800743
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600744 rc = SMB2_close(0, tcon, cancelled->fid.persistent_fid,
745 cancelled->fid.volatile_fid);
746 if (rc)
747 cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc);
748
749 cifs_put_tcon(tcon);
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800750 kfree(cancelled);
751}
752
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600753/*
754 * Caller should already has an extra reference to @tcon
755 * This function is used to queue work to close a handle to prevent leaks
756 * on the server.
757 * We handle two cases. If an open was interrupted after we sent the
758 * SMB2_CREATE to the server but before we processed the reply, and second
759 * if a close was interrupted before we sent the SMB2_CLOSE to the server.
760 */
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800761static int
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600762__smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
763 __u64 persistent_fid, __u64 volatile_fid)
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800764{
765 struct close_cancelled_open *cancelled;
766
Paulo Alcantara (SUSE)0a5a9882020-01-13 17:46:59 -0300767 cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800768 if (!cancelled)
769 return -ENOMEM;
770
771 cancelled->fid.persistent_fid = persistent_fid;
772 cancelled->fid.volatile_fid = volatile_fid;
773 cancelled->tcon = tcon;
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600774 cancelled->cmd = cmd;
775 cancelled->mid = mid;
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800776 INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
777 WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false);
778
779 return 0;
780}
781
782int
783smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
784 __u64 volatile_fid)
785{
786 int rc;
787
788 cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
789 spin_lock(&cifs_tcp_ses_lock);
Aurelien Aptele79b0332020-04-07 11:49:55 +0200790 if (tcon->tc_count <= 0) {
791 struct TCP_Server_Info *server = NULL;
792
793 WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative");
794 spin_unlock(&cifs_tcp_ses_lock);
795
796 if (tcon->ses)
797 server = tcon->ses->server;
798
799 cifs_server_dbg(FYI, "tid=%u: tcon is closing, skipping async close retry of fid %llu %llu\n",
800 tcon->tid, persistent_fid, volatile_fid);
801
802 return 0;
803 }
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800804 tcon->tc_count++;
805 spin_unlock(&cifs_tcp_ses_lock);
806
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600807 rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0,
808 persistent_fid, volatile_fid);
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800809 if (rc)
810 cifs_put_tcon(tcon);
811
812 return rc;
813}
814
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800815int
Paulo Alcantara04ad69c2021-03-08 12:00:50 -0300816smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server)
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800817{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900818 struct smb2_hdr *hdr = mid->resp_buf;
Paulo Alcantara04ad69c2021-03-08 12:00:50 -0300819 struct smb2_create_rsp *rsp = mid->resp_buf;
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800820 struct cifs_tcon *tcon;
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800821 int rc;
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800822
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900823 if ((mid->optype & CIFS_CP_CREATE_CLOSE_OP) || hdr->Command != SMB2_CREATE ||
824 hdr->Status != STATUS_SUCCESS)
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800825 return 0;
826
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900827 tcon = smb2_find_smb_tcon(server, le64_to_cpu(hdr->SessionId),
828 le32_to_cpu(hdr->Id.SyncId.TreeId));
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800829 if (!tcon)
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800830 return -ENOENT;
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800831
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600832 rc = __smb2_handle_cancelled_cmd(tcon,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900833 le16_to_cpu(hdr->Command),
834 le64_to_cpu(hdr->MessageId),
Ronnie Sahlbergc4628702021-09-08 12:10:15 +1000835 le64_to_cpu(rsp->PersistentFileId),
836 le64_to_cpu(rsp->VolatileFileId));
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800837 if (rc)
838 cifs_put_tcon(tcon);
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800839
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800840 return rc;
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800841}
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100842
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100843/**
844 * smb311_update_preauth_hash - update @ses hash with the packet data in @iov
845 *
846 * Assumes @iov does not contain the rfc1002 length and iov[0] has the
847 * SMB2 header.
Steve French607dfc72020-12-12 12:08:58 -0600848 *
849 * @ses: server session structure
Yang Li3ac5f2f2022-01-05 17:39:09 +0800850 * @server: pointer to server info
Steve French607dfc72020-12-12 12:08:58 -0600851 * @iov: array containing the SMB request we will send to the server
852 * @nvec: number of array entries for the iov
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100853 */
854int
Shyam Prasad Nf486ef82021-07-19 13:54:16 +0000855smb311_update_preauth_hash(struct cifs_ses *ses, struct TCP_Server_Info *server,
856 struct kvec *iov, int nvec)
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100857{
858 int i, rc;
859 struct sdesc *d;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900860 struct smb2_hdr *hdr;
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100861
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900862 hdr = (struct smb2_hdr *)iov[0].iov_base;
Aurelien Apteld70e9fa2019-09-20 06:31:10 +0200863 /* neg prot are always taken */
864 if (hdr->Command == SMB2_NEGOTIATE)
865 goto ok;
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100866
Aurelien Apteld70e9fa2019-09-20 06:31:10 +0200867 /*
868 * If we process a command which wasn't a negprot it means the
869 * neg prot was already done, so the server dialect was set
870 * and we can test it. Preauth requires 3.1.1 for now.
871 */
872 if (server->dialect != SMB311_PROT_ID)
873 return 0;
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100874
Aurelien Apteld70e9fa2019-09-20 06:31:10 +0200875 if (hdr->Command != SMB2_SESSION_SETUP)
876 return 0;
877
878 /* skip last sess setup response */
879 if ((hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
880 && (hdr->Status == NT_STATUS_OK
881 || (hdr->Status !=
882 cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))))
883 return 0;
884
885ok:
886 rc = smb311_crypto_shash_allocate(server);
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100887 if (rc)
888 return rc;
889
Aurelien Apteld70e9fa2019-09-20 06:31:10 +0200890 d = server->secmech.sdescsha512;
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100891 rc = crypto_shash_init(&d->shash);
892 if (rc) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700893 cifs_dbg(VFS, "%s: Could not init sha512 shash\n", __func__);
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100894 return rc;
895 }
896
897 rc = crypto_shash_update(&d->shash, ses->preauth_sha_hash,
898 SMB2_PREAUTH_HASH_SIZE);
899 if (rc) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700900 cifs_dbg(VFS, "%s: Could not update sha512 shash\n", __func__);
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100901 return rc;
902 }
903
904 for (i = 0; i < nvec; i++) {
905 rc = crypto_shash_update(&d->shash,
906 iov[i].iov_base, iov[i].iov_len);
907 if (rc) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700908 cifs_dbg(VFS, "%s: Could not update sha512 shash\n",
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100909 __func__);
910 return rc;
911 }
912 }
913
914 rc = crypto_shash_final(&d->shash, ses->preauth_sha_hash);
915 if (rc) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700916 cifs_dbg(VFS, "%s: Could not finalize sha512 shash\n",
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100917 __func__);
918 return rc;
919 }
920
921 return 0;
922}