blob: 48657ddbd75e689c20fc05034f2c13ea0870e11e [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Steve French1080ef72011-02-24 18:07:19 +00006 */
7
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07008#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07009#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050010#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070011#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020012#include <linux/uuid.h>
Aurelien Aptel35adffe2019-09-20 06:29:39 +020013#include <linux/sort.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070014#include <crypto/aead.h>
Christoph Hellwig10c5db22020-05-23 09:30:11 +020015#include <linux/fiemap.h>
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +100016#include "cifsfs.h"
Steve French1080ef72011-02-24 18:07:19 +000017#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040018#include "smb2pdu.h"
19#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040020#include "cifsproto.h"
21#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040022#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070023#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070024#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050025#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070026#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040027
Pavel Shilovskyef68e832019-01-18 17:25:36 -080028/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040029static int
30change_conf(struct TCP_Server_Info *server)
31{
32 server->credits += server->echo_credits + server->oplock_credits;
33 server->oplock_credits = server->echo_credits = 0;
34 switch (server->credits) {
35 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080036 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040037 case 1:
38 server->echoes = false;
39 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040040 break;
41 case 2:
42 server->echoes = true;
43 server->oplocks = false;
44 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040045 break;
46 default:
47 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050048 if (enable_oplocks) {
49 server->oplocks = true;
50 server->oplock_credits = 1;
51 } else
52 server->oplocks = false;
53
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040054 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040055 }
56 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080057 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040058}
59
60static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080061smb2_add_credits(struct TCP_Server_Info *server,
62 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040063{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080064 int *val, rc = -1;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080065 unsigned int add = credits->value;
66 unsigned int instance = credits->instance;
67 bool reconnect_detected = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080068
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040069 spin_lock(&server->req_lock);
70 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050071
72 /* eg found case where write overlapping reconnect messed up credits */
73 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
74 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
75 server->hostname, *val);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080076 if ((instance == 0) || (instance == server->reconnect_instance))
77 *val += add;
78 else
79 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050080
Steve French141891f2016-09-23 00:44:16 -050081 if (*val > 65000) {
82 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
Joe Perchesa0a30362020-04-14 22:42:53 -070083 pr_warn_once("server overflowed SMB3 credits\n");
Steve French141891f2016-09-23 00:44:16 -050084 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040085 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040086 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040087 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070088 /*
89 * Sometimes server returns 0 credits on oplock break ack - we need to
90 * rebalance credits in this case.
91 */
92 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
93 server->oplocks) {
94 if (server->credits > 1) {
95 server->credits--;
96 server->oplock_credits++;
97 }
98 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040099 spin_unlock(&server->req_lock);
100 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800101
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800102 if (reconnect_detected)
103 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
104 add, instance);
105
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800106 if (server->tcpStatus == CifsNeedReconnect
107 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800108 return;
109
110 switch (rc) {
111 case -1:
112 /* change_conf hasn't been executed */
113 break;
114 case 0:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000115 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800116 break;
117 case 1:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000118 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800119 break;
120 case 2:
121 cifs_dbg(FYI, "disabling oplocks\n");
122 break;
123 default:
124 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
125 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400126}
127
128static void
129smb2_set_credits(struct TCP_Server_Info *server, const int val)
130{
131 spin_lock(&server->req_lock);
132 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500133 if (val == 1)
134 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400135 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500136 /* don't log while holding the lock */
137 if (val == 1)
138 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400139}
140
141static int *
142smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
143{
144 switch (optype) {
145 case CIFS_ECHO_OP:
146 return &server->echo_credits;
147 case CIFS_OBREAK_OP:
148 return &server->oplock_credits;
149 default:
150 return &server->credits;
151 }
152}
153
154static unsigned int
155smb2_get_credits(struct mid_q_entry *mid)
156{
Pavel Shilovsky86a79642019-11-21 11:35:13 -0800157 return mid->credits_received;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400158}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400159
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400160static int
161smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800162 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400163{
164 int rc = 0;
165 unsigned int scredits;
166
167 spin_lock(&server->req_lock);
168 while (1) {
169 if (server->credits <= 0) {
170 spin_unlock(&server->req_lock);
171 cifs_num_waiters_inc(server);
172 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000173 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400174 cifs_num_waiters_dec(server);
175 if (rc)
176 return rc;
177 spin_lock(&server->req_lock);
178 } else {
179 if (server->tcpStatus == CifsExiting) {
180 spin_unlock(&server->req_lock);
181 return -ENOENT;
182 }
183
184 scredits = server->credits;
185 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800186 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400187 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800188 credits->value = 0;
189 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400190 break;
191 }
192
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800193 /* leave some credits for reopen and other ops */
194 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400195 *num = min_t(unsigned int, size,
196 scredits * SMB2_MAX_BUFFER_SIZE);
197
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800198 credits->value =
199 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
200 credits->instance = server->reconnect_instance;
201 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400202 server->in_flight++;
Steve French1b63f182019-09-09 22:57:11 -0500203 if (server->in_flight > server->max_in_flight)
204 server->max_in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400205 break;
206 }
207 }
208 spin_unlock(&server->req_lock);
209 return rc;
210}
211
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800212static int
213smb2_adjust_credits(struct TCP_Server_Info *server,
214 struct cifs_credits *credits,
215 const unsigned int payload_size)
216{
217 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
218
219 if (!credits->value || credits->value == new_val)
220 return 0;
221
222 if (credits->value < new_val) {
223 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
224 credits->value, new_val);
225 return -ENOTSUPP;
226 }
227
228 spin_lock(&server->req_lock);
229
230 if (server->reconnect_instance != credits->instance) {
231 spin_unlock(&server->req_lock);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000232 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800233 credits->value - new_val);
234 return -EAGAIN;
235 }
236
237 server->credits += credits->value - new_val;
238 spin_unlock(&server->req_lock);
239 wake_up(&server->request_q);
240 credits->value = new_val;
241 return 0;
242}
243
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400244static __u64
245smb2_get_next_mid(struct TCP_Server_Info *server)
246{
247 __u64 mid;
248 /* for SMB2 we need the current value */
249 spin_lock(&GlobalMid_Lock);
250 mid = server->CurrentMid++;
251 spin_unlock(&GlobalMid_Lock);
252 return mid;
253}
Steve French1080ef72011-02-24 18:07:19 +0000254
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800255static void
256smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
257{
258 spin_lock(&GlobalMid_Lock);
259 if (server->CurrentMid >= val)
260 server->CurrentMid -= val;
261 spin_unlock(&GlobalMid_Lock);
262}
263
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400264static struct mid_q_entry *
265smb2_find_mid(struct TCP_Server_Info *server, char *buf)
266{
267 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000268 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700269 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400270
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700271 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000272 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600273 return NULL;
274 }
275
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400276 spin_lock(&GlobalMid_Lock);
277 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000278 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400279 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700280 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200281 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400282 spin_unlock(&GlobalMid_Lock);
283 return mid;
284 }
285 }
286 spin_unlock(&GlobalMid_Lock);
287 return NULL;
288}
289
290static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600291smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400292{
293#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000294 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400295
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000296 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700297 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
298 shdr->ProcessId);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000299 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500300 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400301#endif
302}
303
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400304static bool
305smb2_need_neg(struct TCP_Server_Info *server)
306{
307 return server->max_read == 0;
308}
309
310static int
311smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
312{
313 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200314
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200315 cifs_ses_server(ses)->CurrentMid = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400316 rc = SMB2_negotiate(xid, ses);
317 /* BB we probably don't need to retry with modern servers */
318 if (rc == -EAGAIN)
319 rc = -EHOSTDOWN;
320 return rc;
321}
322
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700323static unsigned int
324smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
325{
326 struct TCP_Server_Info *server = tcon->ses->server;
327 unsigned int wsize;
328
329 /* start with specified wsize, or default */
330 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
331 wsize = min_t(unsigned int, wsize, server->max_write);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400332 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
333 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700334
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700335 return wsize;
336}
337
338static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500339smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
340{
341 struct TCP_Server_Info *server = tcon->ses->server;
342 unsigned int wsize;
343
344 /* start with specified wsize, or default */
345 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
346 wsize = min_t(unsigned int, wsize, server->max_write);
347#ifdef CONFIG_CIFS_SMB_DIRECT
348 if (server->rdma) {
349 if (server->sign)
Long Lif7950cb2020-03-26 19:42:24 -0700350 /*
351 * Account for SMB2 data transfer packet header and
352 * possible encryption header
353 */
Steve French3d621232018-09-25 15:33:47 -0500354 wsize = min_t(unsigned int,
Long Lif7950cb2020-03-26 19:42:24 -0700355 wsize,
356 server->smbd_conn->max_fragmented_send_size -
357 SMB2_READWRITE_PDU_HEADER_SIZE -
358 sizeof(struct smb2_transform_hdr));
Steve French3d621232018-09-25 15:33:47 -0500359 else
360 wsize = min_t(unsigned int,
361 wsize, server->smbd_conn->max_readwrite_size);
362 }
363#endif
364 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
365 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
366
367 return wsize;
368}
369
370static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700371smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
372{
373 struct TCP_Server_Info *server = tcon->ses->server;
374 unsigned int rsize;
375
376 /* start with specified rsize, or default */
377 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
378 rsize = min_t(unsigned int, rsize, server->max_read);
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400379
380 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
381 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700382
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700383 return rsize;
384}
385
Steve French3d621232018-09-25 15:33:47 -0500386static unsigned int
387smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
388{
389 struct TCP_Server_Info *server = tcon->ses->server;
390 unsigned int rsize;
391
392 /* start with specified rsize, or default */
393 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
394 rsize = min_t(unsigned int, rsize, server->max_read);
395#ifdef CONFIG_CIFS_SMB_DIRECT
396 if (server->rdma) {
397 if (server->sign)
Long Lif7950cb2020-03-26 19:42:24 -0700398 /*
399 * Account for SMB2 data transfer packet header and
400 * possible encryption header
401 */
Steve French3d621232018-09-25 15:33:47 -0500402 rsize = min_t(unsigned int,
Long Lif7950cb2020-03-26 19:42:24 -0700403 rsize,
404 server->smbd_conn->max_fragmented_recv_size -
405 SMB2_READWRITE_PDU_HEADER_SIZE -
406 sizeof(struct smb2_transform_hdr));
Steve French3d621232018-09-25 15:33:47 -0500407 else
408 rsize = min_t(unsigned int,
409 rsize, server->smbd_conn->max_readwrite_size);
410 }
411#endif
412
413 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
414 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
415
416 return rsize;
417}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200418
419static int
420parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
421 size_t buf_len,
422 struct cifs_server_iface **iface_list,
423 size_t *iface_count)
424{
425 struct network_interface_info_ioctl_rsp *p;
426 struct sockaddr_in *addr4;
427 struct sockaddr_in6 *addr6;
428 struct iface_info_ipv4 *p4;
429 struct iface_info_ipv6 *p6;
430 struct cifs_server_iface *info;
431 ssize_t bytes_left;
432 size_t next = 0;
433 int nb_iface = 0;
434 int rc = 0;
435
436 *iface_list = NULL;
437 *iface_count = 0;
438
439 /*
440 * Fist pass: count and sanity check
441 */
442
443 bytes_left = buf_len;
444 p = buf;
445 while (bytes_left >= sizeof(*p)) {
446 nb_iface++;
447 next = le32_to_cpu(p->Next);
448 if (!next) {
449 bytes_left -= sizeof(*p);
450 break;
451 }
452 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
453 bytes_left -= next;
454 }
455
456 if (!nb_iface) {
457 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
458 rc = -EINVAL;
459 goto out;
460 }
461
462 if (bytes_left || p->Next)
463 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
464
465
466 /*
467 * Second pass: extract info to internal structure
468 */
469
470 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
471 if (!*iface_list) {
472 rc = -ENOMEM;
473 goto out;
474 }
475
476 info = *iface_list;
477 bytes_left = buf_len;
478 p = buf;
479 while (bytes_left >= sizeof(*p)) {
480 info->speed = le64_to_cpu(p->LinkSpeed);
481 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
482 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
483
484 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
485 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
486 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
487 le32_to_cpu(p->Capability));
488
489 switch (p->Family) {
490 /*
491 * The kernel and wire socket structures have the same
492 * layout and use network byte order but make the
493 * conversion explicit in case either one changes.
494 */
495 case INTERNETWORK:
496 addr4 = (struct sockaddr_in *)&info->sockaddr;
497 p4 = (struct iface_info_ipv4 *)p->Buffer;
498 addr4->sin_family = AF_INET;
499 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
500
501 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
502 addr4->sin_port = cpu_to_be16(CIFS_PORT);
503
504 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
505 &addr4->sin_addr);
506 break;
507 case INTERNETWORKV6:
508 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
509 p6 = (struct iface_info_ipv6 *)p->Buffer;
510 addr6->sin6_family = AF_INET6;
511 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
512
513 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
514 addr6->sin6_flowinfo = 0;
515 addr6->sin6_scope_id = 0;
516 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
517
518 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
519 &addr6->sin6_addr);
520 break;
521 default:
522 cifs_dbg(VFS,
523 "%s: skipping unsupported socket family\n",
524 __func__);
525 goto next_iface;
526 }
527
528 (*iface_count)++;
529 info++;
530next_iface:
531 next = le32_to_cpu(p->Next);
532 if (!next)
533 break;
534 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
535 bytes_left -= next;
536 }
537
538 if (!*iface_count) {
539 rc = -EINVAL;
540 goto out;
541 }
542
543out:
544 if (rc) {
545 kfree(*iface_list);
546 *iface_count = 0;
547 *iface_list = NULL;
548 }
549 return rc;
550}
551
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200552static int compare_iface(const void *ia, const void *ib)
553{
554 const struct cifs_server_iface *a = (struct cifs_server_iface *)ia;
555 const struct cifs_server_iface *b = (struct cifs_server_iface *)ib;
556
557 return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1);
558}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200559
Steve Frenchc481e9f2013-10-14 01:21:53 -0500560static int
561SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
562{
563 int rc;
564 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200565 struct network_interface_info_ioctl_rsp *out_buf = NULL;
566 struct cifs_server_iface *iface_list;
567 size_t iface_count;
568 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500569
570 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
571 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
572 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500573 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500574 if (rc == -EOPNOTSUPP) {
575 cifs_dbg(FYI,
576 "server does not support query network interfaces\n");
577 goto out;
578 } else if (rc != 0) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000579 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200580 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500581 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200582
583 rc = parse_server_interfaces(out_buf, ret_data_len,
584 &iface_list, &iface_count);
585 if (rc)
586 goto out;
587
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200588 /* sort interfaces from fastest to slowest */
589 sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL);
590
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200591 spin_lock(&ses->iface_lock);
592 kfree(ses->iface_list);
593 ses->iface_list = iface_list;
594 ses->iface_count = iface_count;
595 ses->iface_last_update = jiffies;
596 spin_unlock(&ses->iface_lock);
597
598out:
Steve French24df1482016-09-29 04:20:23 -0500599 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500600 return rc;
601}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500602
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000603static void
604smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000605{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000606 struct cached_fid *cfid = container_of(ref, struct cached_fid,
607 refcount);
608
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000609 if (cfid->is_valid) {
610 cifs_dbg(FYI, "clear cached root file handle\n");
611 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
612 cfid->fid->volatile_fid);
613 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000614 cfid->file_all_info_is_valid = false;
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800615 cfid->has_lease = false;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000616 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000617}
618
619void close_shroot(struct cached_fid *cfid)
620{
621 mutex_lock(&cfid->fid_mutex);
622 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000623 mutex_unlock(&cfid->fid_mutex);
624}
625
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800626void close_shroot_lease_locked(struct cached_fid *cfid)
627{
628 if (cfid->has_lease) {
629 cfid->has_lease = false;
630 kref_put(&cfid->refcount, smb2_close_cached_fid);
631 }
632}
633
634void close_shroot_lease(struct cached_fid *cfid)
635{
636 mutex_lock(&cfid->fid_mutex);
637 close_shroot_lease_locked(cfid);
638 mutex_unlock(&cfid->fid_mutex);
639}
640
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000641void
642smb2_cached_lease_break(struct work_struct *work)
643{
644 struct cached_fid *cfid = container_of(work,
645 struct cached_fid, lease_break);
646
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800647 close_shroot_lease(cfid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000648}
649
Steve French3d4ef9a2018-04-25 22:19:09 -0500650/*
651 * Open the directory at the root of a share
652 */
Amir Goldstein0f060932020-02-03 21:46:43 +0200653int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000654 struct cifs_sb_info *cifs_sb,
655 struct cached_fid **cfid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500656{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000657 struct cifs_ses *ses = tcon->ses;
658 struct TCP_Server_Info *server = ses->server;
659 struct cifs_open_parms oparms;
660 struct smb2_create_rsp *o_rsp = NULL;
661 struct smb2_query_info_rsp *qi_rsp = NULL;
662 int resp_buftype[2];
663 struct smb_rqst rqst[2];
664 struct kvec rsp_iov[2];
665 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
666 struct kvec qi_iov[1];
667 int rc, flags = 0;
668 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000669 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000670 struct cifs_fid *pfid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500671
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000672 mutex_lock(&tcon->crfid.fid_mutex);
673 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500674 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000675 *cfid = &tcon->crfid;
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000676 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000677 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500678 return 0;
679 }
680
Steve French96d9f7e2019-09-12 17:52:54 -0500681 /*
682 * We do not hold the lock for the open because in case
683 * SMB2_open needs to reconnect, it will end up calling
684 * cifs_mark_open_files_invalid() which takes the lock again
685 * thus causing a deadlock
686 */
687
688 mutex_unlock(&tcon->crfid.fid_mutex);
689
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000690 if (smb3_encryption_required(tcon))
691 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500692
Paulo Alcantara0fe07812020-04-20 23:44:24 -0300693 if (!server->ops->new_lease_key)
694 return -EIO;
695
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000696 pfid = tcon->crfid.fid;
Paulo Alcantara0fe07812020-04-20 23:44:24 -0300697 server->ops->new_lease_key(pfid);
698
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000699 memset(rqst, 0, sizeof(rqst));
700 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
701 memset(rsp_iov, 0, sizeof(rsp_iov));
702
703 /* Open */
704 memset(&open_iov, 0, sizeof(open_iov));
705 rqst[0].rq_iov = open_iov;
706 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
707
708 oparms.tcon = tcon;
Amir Goldstein0f060932020-02-03 21:46:43 +0200709 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000710 oparms.desired_access = FILE_READ_ATTRIBUTES;
711 oparms.disposition = FILE_OPEN;
712 oparms.fid = pfid;
713 oparms.reconnect = false;
714
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500715 rc = SMB2_open_init(tcon, server,
716 &rqst[0], &oplock, &oparms, &utf16_path);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000717 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500718 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000719 smb2_set_next_command(tcon, &rqst[0]);
720
721 memset(&qi_iov, 0, sizeof(qi_iov));
722 rqst[1].rq_iov = qi_iov;
723 rqst[1].rq_nvec = 1;
724
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500725 rc = SMB2_query_info_init(tcon, server,
726 &rqst[1], COMPOUND_FID,
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000727 COMPOUND_FID, FILE_ALL_INFORMATION,
728 SMB2_O_INFO_FILE, 0,
729 sizeof(struct smb2_file_all_info) +
730 PATH_MAX * 2, 0, NULL);
731 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500732 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000733
734 smb2_set_related(&rqst[1]);
735
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500736 rc = compound_send_recv(xid, ses, server,
737 flags, 2, rqst,
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000738 resp_buftype, rsp_iov);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200739 mutex_lock(&tcon->crfid.fid_mutex);
740
741 /*
742 * Now we need to check again as the cached root might have
743 * been successfully re-opened from a concurrent process
744 */
745
746 if (tcon->crfid.is_valid) {
747 /* work was already done */
748
749 /* stash fids for close() later */
750 struct cifs_fid fid = {
751 .persistent_fid = pfid->persistent_fid,
752 .volatile_fid = pfid->volatile_fid,
753 };
754
755 /*
756 * caller expects this func to set pfid to a valid
757 * cached root, so we copy the existing one and get a
758 * reference.
759 */
760 memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
761 kref_get(&tcon->crfid.refcount);
762
763 mutex_unlock(&tcon->crfid.fid_mutex);
764
765 if (rc == 0) {
766 /* close extra handle outside of crit sec */
767 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
768 }
Xiyu Yang77577de2020-06-13 20:27:09 +0800769 rc = 0;
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200770 goto oshr_free;
771 }
772
773 /* Cached root is still invalid, continue normaly */
774
Steve French7dcc82c2019-09-11 00:07:36 -0500775 if (rc) {
776 if (rc == -EREMCHG) {
777 tcon->need_reconnect = true;
Joe Perchesa0a30362020-04-14 22:42:53 -0700778 pr_warn_once("server share %s deleted\n",
779 tcon->treeName);
Steve French7dcc82c2019-09-11 00:07:36 -0500780 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000781 goto oshr_exit;
Steve French7dcc82c2019-09-11 00:07:36 -0500782 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000783
Steve Frenchd2f15422019-09-22 00:55:46 -0500784 atomic_inc(&tcon->num_remote_opens);
785
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000786 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
787 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
788 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
789#ifdef CONFIG_CIFS_DEBUG2
790 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
791#endif /* CIFS_DEBUG2 */
792
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000793 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
794 tcon->crfid.tcon = tcon;
795 tcon->crfid.is_valid = true;
796 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000797
Steve French89a5bfa2019-07-18 17:22:18 -0500798 /* BB TBD check to see if oplock level check can be removed below */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000799 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
800 kref_get(&tcon->crfid.refcount);
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800801 tcon->crfid.has_lease = true;
Steve French89a5bfa2019-07-18 17:22:18 -0500802 smb2_parse_contexts(server, o_rsp,
803 &oparms.fid->epoch,
Aurelien Aptel69dda302020-03-02 17:53:22 +0100804 oparms.fid->lease_key, &oplock,
805 NULL, NULL);
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000806 } else
807 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000808
809 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
810 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
811 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000812 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000813 le16_to_cpu(qi_rsp->OutputBufferOffset),
814 sizeof(struct smb2_file_all_info),
815 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000816 (char *)&tcon->crfid.file_all_info))
zhengbin720aec02019-12-25 11:30:20 +0800817 tcon->crfid.file_all_info_is_valid = true;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000818
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200819oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000820 mutex_unlock(&tcon->crfid.fid_mutex);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200821oshr_free:
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000822 SMB2_open_free(&rqst[0]);
823 SMB2_query_info_free(&rqst[1]);
824 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
825 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000826 if (rc == 0)
827 *cfid = &tcon->crfid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500828 return rc;
829}
830
Steve French34f62642013-10-09 02:07:00 -0500831static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200832smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
833 struct cifs_sb_info *cifs_sb)
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500834{
835 int rc;
836 __le16 srch_path = 0; /* Null - open root of share */
837 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
838 struct cifs_open_parms oparms;
839 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500840 bool no_cached_open = tcon->nohandlecache;
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000841 struct cached_fid *cfid = NULL;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500842
843 oparms.tcon = tcon;
844 oparms.desired_access = FILE_READ_ATTRIBUTES;
845 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200846 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500847 oparms.fid = &fid;
848 oparms.reconnect = false;
849
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000850 if (no_cached_open) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000851 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
Aurelien Aptel69dda302020-03-02 17:53:22 +0100852 NULL, NULL);
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000853 } else {
854 rc = open_shroot(xid, tcon, cifs_sb, &cfid);
855 if (rc == 0)
856 memcpy(&fid, cfid->fid, sizeof(struct cifs_fid));
857 }
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500858 if (rc)
859 return;
860
Steve Frenchc481e9f2013-10-14 01:21:53 -0500861 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500862
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500863 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
864 FS_ATTRIBUTE_INFORMATION);
865 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
866 FS_DEVICE_INFORMATION);
867 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500868 FS_VOLUME_INFORMATION);
869 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500870 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500871 if (no_cached_open)
872 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000873 else
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000874 close_shroot(cfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500875}
876
877static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200878smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
879 struct cifs_sb_info *cifs_sb)
Steve French34f62642013-10-09 02:07:00 -0500880{
881 int rc;
882 __le16 srch_path = 0; /* Null - open root of share */
883 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
884 struct cifs_open_parms oparms;
885 struct cifs_fid fid;
886
887 oparms.tcon = tcon;
888 oparms.desired_access = FILE_READ_ATTRIBUTES;
889 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200890 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French34f62642013-10-09 02:07:00 -0500891 oparms.fid = &fid;
892 oparms.reconnect = false;
893
Aurelien Aptel69dda302020-03-02 17:53:22 +0100894 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
895 NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500896 if (rc)
897 return;
898
Steven French21671142013-10-09 13:36:35 -0500899 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
900 FS_ATTRIBUTE_INFORMATION);
901 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
902 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500903 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -0500904}
905
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400906static int
907smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
908 struct cifs_sb_info *cifs_sb, const char *full_path)
909{
910 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400911 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700912 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400913 struct cifs_open_parms oparms;
914 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400915
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000916 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500917 return 0;
918
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400919 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
920 if (!utf16_path)
921 return -ENOMEM;
922
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400923 oparms.tcon = tcon;
924 oparms.desired_access = FILE_READ_ATTRIBUTES;
925 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200926 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400927 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400928 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400929
Aurelien Aptel69dda302020-03-02 17:53:22 +0100930 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
931 NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400932 if (rc) {
933 kfree(utf16_path);
934 return rc;
935 }
936
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400937 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400938 kfree(utf16_path);
939 return rc;
940}
941
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400942static int
943smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
944 struct cifs_sb_info *cifs_sb, const char *full_path,
945 u64 *uniqueid, FILE_ALL_INFO *data)
946{
947 *uniqueid = le64_to_cpu(data->IndexNumber);
948 return 0;
949}
950
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700951static int
952smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
953 struct cifs_fid *fid, FILE_ALL_INFO *data)
954{
955 int rc;
956 struct smb2_file_all_info *smb2_data;
957
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400958 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700959 GFP_KERNEL);
960 if (smb2_data == NULL)
961 return -ENOMEM;
962
963 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
964 smb2_data);
965 if (!rc)
966 move_smb2_info_to_cifs(data, smb2_data);
967 kfree(smb2_data);
968 return rc;
969}
970
Arnd Bergmann1368f152017-09-05 11:24:15 +0200971#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000972static ssize_t
973move_smb2_ea_to_cifs(char *dst, size_t dst_size,
974 struct smb2_file_full_ea_info *src, size_t src_size,
975 const unsigned char *ea_name)
976{
977 int rc = 0;
978 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
979 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000980 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000981 size_t name_len, value_len, user_name_len;
982
983 while (src_size > 0) {
984 name = &src->ea_data[0];
985 name_len = (size_t)src->ea_name_length;
986 value = &src->ea_data[src->ea_name_length + 1];
987 value_len = (size_t)le16_to_cpu(src->ea_value_length);
988
Christoph Probsta205d502019-05-08 21:36:25 +0200989 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000990 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000991
992 if (src_size < 8 + name_len + 1 + value_len) {
993 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
994 rc = -EIO;
995 goto out;
996 }
997
998 if (ea_name) {
999 if (ea_name_len == name_len &&
1000 memcmp(ea_name, name, name_len) == 0) {
1001 rc = value_len;
1002 if (dst_size == 0)
1003 goto out;
1004 if (dst_size < value_len) {
1005 rc = -ERANGE;
1006 goto out;
1007 }
1008 memcpy(dst, value, value_len);
1009 goto out;
1010 }
1011 } else {
1012 /* 'user.' plus a terminating null */
1013 user_name_len = 5 + 1 + name_len;
1014
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001015 if (buf_size == 0) {
1016 /* skip copy - calc size only */
1017 rc += user_name_len;
1018 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001019 dst_size -= user_name_len;
1020 memcpy(dst, "user.", 5);
1021 dst += 5;
1022 memcpy(dst, src->ea_data, name_len);
1023 dst += name_len;
1024 *dst = 0;
1025 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001026 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001027 } else {
1028 /* stop before overrun buffer */
1029 rc = -ERANGE;
1030 break;
1031 }
1032 }
1033
1034 if (!src->next_entry_offset)
1035 break;
1036
1037 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1038 /* stop before overrun buffer */
1039 rc = -ERANGE;
1040 break;
1041 }
1042 src_size -= le32_to_cpu(src->next_entry_offset);
1043 src = (void *)((char *)src +
1044 le32_to_cpu(src->next_entry_offset));
1045 }
1046
1047 /* didn't find the named attribute */
1048 if (ea_name)
1049 rc = -ENODATA;
1050
1051out:
1052 return (ssize_t)rc;
1053}
1054
1055static ssize_t
1056smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1057 const unsigned char *path, const unsigned char *ea_name,
1058 char *ea_data, size_t buf_size,
1059 struct cifs_sb_info *cifs_sb)
1060{
1061 int rc;
1062 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001063 struct kvec rsp_iov = {NULL, 0};
1064 int buftype = CIFS_NO_BUFFER;
1065 struct smb2_query_info_rsp *rsp;
1066 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001067
1068 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1069 if (!utf16_path)
1070 return -ENOMEM;
1071
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001072 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1073 FILE_READ_EA,
1074 FILE_FULL_EA_INFORMATION,
1075 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +10001076 CIFSMaxBufSize -
1077 MAX_SMB2_CREATE_RESPONSE_SIZE -
1078 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001079 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001080 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001081 /*
1082 * If ea_name is NULL (listxattr) and there are no EAs,
1083 * return 0 as it's not an error. Otherwise, the specified
1084 * ea_name was not found.
1085 */
1086 if (!ea_name && rc == -ENODATA)
1087 rc = 0;
1088 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001089 }
1090
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001091 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1092 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1093 le32_to_cpu(rsp->OutputBufferLength),
1094 &rsp_iov,
1095 sizeof(struct smb2_file_full_ea_info));
1096 if (rc)
1097 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001098
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001099 info = (struct smb2_file_full_ea_info *)(
1100 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1101 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1102 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001103
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001104 qeas_exit:
1105 kfree(utf16_path);
1106 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001107 return rc;
1108}
1109
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001110
1111static int
1112smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1113 const char *path, const char *ea_name, const void *ea_value,
1114 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1115 struct cifs_sb_info *cifs_sb)
1116{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001117 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001118 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001119 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001120 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001121 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001122 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001123 struct smb_rqst rqst[3];
1124 int resp_buftype[3];
1125 struct kvec rsp_iov[3];
1126 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1127 struct cifs_open_parms oparms;
1128 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1129 struct cifs_fid fid;
1130 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1131 unsigned int size[1];
1132 void *data[1];
1133 struct smb2_file_full_ea_info *ea = NULL;
1134 struct kvec close_iov[1];
Ronnie Sahlberg85db6b72020-02-13 12:14:47 +10001135 struct smb2_query_info_rsp *rsp;
1136 int rc, used_len = 0;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001137
1138 if (smb3_encryption_required(tcon))
1139 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001140
1141 if (ea_name_len > 255)
1142 return -EINVAL;
1143
1144 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1145 if (!utf16_path)
1146 return -ENOMEM;
1147
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001148 memset(rqst, 0, sizeof(rqst));
1149 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1150 memset(rsp_iov, 0, sizeof(rsp_iov));
1151
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001152 if (ses->server->ops->query_all_EAs) {
1153 if (!ea_value) {
1154 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1155 ea_name, NULL, 0,
1156 cifs_sb);
1157 if (rc == -ENODATA)
1158 goto sea_exit;
Ronnie Sahlberg85db6b72020-02-13 12:14:47 +10001159 } else {
1160 /* If we are adding a attribute we should first check
1161 * if there will be enough space available to store
1162 * the new EA. If not we should not add it since we
1163 * would not be able to even read the EAs back.
1164 */
1165 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1166 FILE_READ_EA,
1167 FILE_FULL_EA_INFORMATION,
1168 SMB2_O_INFO_FILE,
1169 CIFSMaxBufSize -
1170 MAX_SMB2_CREATE_RESPONSE_SIZE -
1171 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1172 &rsp_iov[1], &resp_buftype[1], cifs_sb);
1173 if (rc == 0) {
1174 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1175 used_len = le32_to_cpu(rsp->OutputBufferLength);
1176 }
1177 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1178 resp_buftype[1] = CIFS_NO_BUFFER;
1179 memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1180 rc = 0;
1181
1182 /* Use a fudge factor of 256 bytes in case we collide
1183 * with a different set_EAs command.
1184 */
1185 if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1186 MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1187 used_len + ea_name_len + ea_value_len + 1) {
1188 rc = -ENOSPC;
1189 goto sea_exit;
1190 }
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001191 }
1192 }
1193
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001194 /* Open */
1195 memset(&open_iov, 0, sizeof(open_iov));
1196 rqst[0].rq_iov = open_iov;
1197 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1198
1199 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001200 oparms.tcon = tcon;
1201 oparms.desired_access = FILE_WRITE_EA;
1202 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001203 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001204 oparms.fid = &fid;
1205 oparms.reconnect = false;
1206
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001207 rc = SMB2_open_init(tcon, server,
1208 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001209 if (rc)
1210 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001211 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001212
1213
1214 /* Set Info */
1215 memset(&si_iov, 0, sizeof(si_iov));
1216 rqst[1].rq_iov = si_iov;
1217 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001218
Vladimir Zapolskiy64b7f672020-10-10 21:25:54 +03001219 len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001220 ea = kzalloc(len, GFP_KERNEL);
1221 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001222 rc = -ENOMEM;
1223 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001224 }
1225
1226 ea->ea_name_length = ea_name_len;
1227 ea->ea_value_length = cpu_to_le16(ea_value_len);
1228 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1229 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1230
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001231 size[0] = len;
1232 data[0] = ea;
1233
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001234 rc = SMB2_set_info_init(tcon, server,
1235 &rqst[1], COMPOUND_FID,
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001236 COMPOUND_FID, current->tgid,
1237 FILE_FULL_EA_INFORMATION,
1238 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001239 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001240 smb2_set_related(&rqst[1]);
1241
1242
1243 /* Close */
1244 memset(&close_iov, 0, sizeof(close_iov));
1245 rqst[2].rq_iov = close_iov;
1246 rqst[2].rq_nvec = 1;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001247 rc = SMB2_close_init(tcon, server,
1248 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001249 smb2_set_related(&rqst[2]);
1250
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001251 rc = compound_send_recv(xid, ses, server,
1252 flags, 3, rqst,
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001253 resp_buftype, rsp_iov);
Steve Frenchd2f15422019-09-22 00:55:46 -05001254 /* no need to bump num_remote_opens because handle immediately closed */
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001255
1256 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001257 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001258 kfree(utf16_path);
1259 SMB2_open_free(&rqst[0]);
1260 SMB2_set_info_free(&rqst[1]);
1261 SMB2_close_free(&rqst[2]);
1262 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1263 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1264 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001265 return rc;
1266}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001267#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001268
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001269static bool
1270smb2_can_echo(struct TCP_Server_Info *server)
1271{
1272 return server->echoes;
1273}
1274
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001275static void
1276smb2_clear_stats(struct cifs_tcon *tcon)
1277{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001278 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001279
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001280 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1281 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1282 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1283 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001284}
1285
1286static void
Steve French769ee6a2013-06-19 14:15:30 -05001287smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1288{
1289 seq_puts(m, "\n\tShare Capabilities:");
1290 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1291 seq_puts(m, " DFS,");
1292 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1293 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1294 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1295 seq_puts(m, " SCALEOUT,");
1296 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1297 seq_puts(m, " CLUSTER,");
1298 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1299 seq_puts(m, " ASYMMETRIC,");
1300 if (tcon->capabilities == 0)
1301 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001302 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1303 seq_puts(m, " Aligned,");
1304 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1305 seq_puts(m, " Partition Aligned,");
1306 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1307 seq_puts(m, " SSD,");
1308 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1309 seq_puts(m, " TRIM-support,");
1310
Steve French769ee6a2013-06-19 14:15:30 -05001311 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001312 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001313 if (tcon->perf_sector_size)
1314 seq_printf(m, "\tOptimal sector size: 0x%x",
1315 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001316 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001317}
1318
1319static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001320smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1321{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001322 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1323 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001324
1325 /*
1326 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1327 * totals (requests sent) since those SMBs are per-session not per tcon
1328 */
Steve French52ce1ac2018-07-31 01:46:47 -05001329 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1330 (long long)(tcon->bytes_read),
1331 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001332 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1333 atomic_read(&tcon->num_local_opens),
1334 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001335 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001336 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1337 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001338 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001339 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1340 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001341 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001342 atomic_read(&sent[SMB2_CREATE_HE]),
1343 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001344 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001345 atomic_read(&sent[SMB2_CLOSE_HE]),
1346 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001347 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001348 atomic_read(&sent[SMB2_FLUSH_HE]),
1349 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001350 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001351 atomic_read(&sent[SMB2_READ_HE]),
1352 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001353 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001354 atomic_read(&sent[SMB2_WRITE_HE]),
1355 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001356 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001357 atomic_read(&sent[SMB2_LOCK_HE]),
1358 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001359 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001360 atomic_read(&sent[SMB2_IOCTL_HE]),
1361 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001362 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001363 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1364 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001365 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001366 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1367 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001368 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001369 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1370 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001371 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001372 atomic_read(&sent[SMB2_SET_INFO_HE]),
1373 atomic_read(&failed[SMB2_SET_INFO_HE]));
1374 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1375 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1376 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001377}
1378
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001379static void
1380smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1381{
David Howells2b0143b2015-03-17 22:25:59 +00001382 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001383 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1384
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001385 cfile->fid.persistent_fid = fid->persistent_fid;
1386 cfile->fid.volatile_fid = fid->volatile_fid;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01001387 cfile->fid.access = fid->access;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001388#ifdef CONFIG_CIFS_DEBUG2
1389 cfile->fid.mid = fid->mid;
1390#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001391 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1392 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001393 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001394 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001395}
1396
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001397static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001398smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1399 struct cifs_fid *fid)
1400{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001401 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001402}
1403
Steve French43f8a6a2019-12-02 21:46:54 -06001404static void
1405smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
1406 struct cifsFileInfo *cfile)
1407{
1408 struct smb2_file_network_open_info file_inf;
1409 struct inode *inode;
1410 int rc;
1411
1412 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
1413 cfile->fid.volatile_fid, &file_inf);
1414 if (rc)
1415 return;
1416
1417 inode = d_inode(cfile->dentry);
1418
1419 spin_lock(&inode->i_lock);
1420 CIFS_I(inode)->time = jiffies;
1421
1422 /* Creation time should not need to be updated on close */
1423 if (file_inf.LastWriteTime)
1424 inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
1425 if (file_inf.ChangeTime)
1426 inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
1427 if (file_inf.LastAccessTime)
1428 inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
1429
1430 /*
1431 * i_blocks is not related to (i_size / i_blksize),
1432 * but instead 512 byte (2**9) size is required for
1433 * calculating num blocks.
1434 */
1435 if (le64_to_cpu(file_inf.AllocationSize) > 4096)
1436 inode->i_blocks =
1437 (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
1438
1439 /* End of file and Attributes should not have to be updated on close */
1440 spin_unlock(&inode->i_lock);
1441}
1442
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001443static int
Steve French41c13582013-11-14 00:05:36 -06001444SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1445 u64 persistent_fid, u64 volatile_fid,
1446 struct copychunk_ioctl *pcchunk)
1447{
1448 int rc;
1449 unsigned int ret_data_len;
1450 struct resume_key_req *res_key;
1451
1452 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1453 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001454 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001455 (char **)&res_key, &ret_data_len);
1456
1457 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001458 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
Steve French41c13582013-11-14 00:05:36 -06001459 goto req_res_key_exit;
1460 }
1461 if (ret_data_len < sizeof(struct resume_key_req)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001462 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
Steve French41c13582013-11-14 00:05:36 -06001463 rc = -EINVAL;
1464 goto req_res_key_exit;
1465 }
1466 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1467
1468req_res_key_exit:
1469 kfree(res_key);
1470 return rc;
1471}
1472
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001473struct iqi_vars {
1474 struct smb_rqst rqst[3];
1475 struct kvec rsp_iov[3];
1476 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1477 struct kvec qi_iov[1];
1478 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
1479 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1480 struct kvec close_iov[1];
1481};
1482
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001483static int
1484smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001485 struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02001486 struct cifs_sb_info *cifs_sb,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001487 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001488 unsigned long p)
1489{
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001490 struct iqi_vars *vars;
1491 struct smb_rqst *rqst;
1492 struct kvec *rsp_iov;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001493 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001494 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001495 char __user *arg = (char __user *)p;
1496 struct smb_query_info qi;
1497 struct smb_query_info __user *pqi;
1498 int rc = 0;
1499 int flags = 0;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001500 struct smb2_query_info_rsp *qi_rsp = NULL;
1501 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001502 void *buffer = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001503 int resp_buftype[3];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001504 struct cifs_open_parms oparms;
1505 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1506 struct cifs_fid fid;
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001507 unsigned int size[2];
1508 void *data[2];
Amir Goldstein0f060932020-02-03 21:46:43 +02001509 int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001510
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001511 vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
1512 if (vars == NULL)
1513 return -ENOMEM;
1514 rqst = &vars->rqst[0];
1515 rsp_iov = &vars->rsp_iov[0];
1516
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001517 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001518
1519 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001520 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001521
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001522 if (qi.output_buffer_length > 1024) {
1523 kfree(vars);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001524 return -EINVAL;
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001525 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001526
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001527 if (!ses || !server) {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001528 kfree(vars);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001529 return -EIO;
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001530 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001531
1532 if (smb3_encryption_required(tcon))
1533 flags |= CIFS_TRANSFORM_REQ;
1534
Markus Elfringcfaa1182019-11-05 21:30:25 +01001535 buffer = memdup_user(arg + sizeof(struct smb_query_info),
1536 qi.output_buffer_length);
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001537 if (IS_ERR(buffer)) {
1538 kfree(vars);
Markus Elfringcfaa1182019-11-05 21:30:25 +01001539 return PTR_ERR(buffer);
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001540 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001541
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001542 /* Open */
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001543 rqst[0].rq_iov = &vars->open_iov[0];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001544 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001545
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001546 memset(&oparms, 0, sizeof(oparms));
1547 oparms.tcon = tcon;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001548 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001549 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001550 oparms.fid = &fid;
1551 oparms.reconnect = false;
1552
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001553 if (qi.flags & PASSTHRU_FSCTL) {
1554 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1555 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1556 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001557 break;
1558 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1559 oparms.desired_access = GENERIC_ALL;
1560 break;
1561 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1562 oparms.desired_access = GENERIC_READ;
1563 break;
1564 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1565 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001566 break;
1567 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001568 } else if (qi.flags & PASSTHRU_SET_INFO) {
1569 oparms.desired_access = GENERIC_WRITE;
1570 } else {
1571 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001572 }
1573
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001574 rc = SMB2_open_init(tcon, server,
1575 &rqst[0], &oplock, &oparms, path);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001576 if (rc)
1577 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001578 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001579
1580 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001581 if (qi.flags & PASSTHRU_FSCTL) {
1582 /* Can eventually relax perm check since server enforces too */
1583 if (!capable(CAP_SYS_ADMIN))
1584 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001585 else {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001586 rqst[1].rq_iov = &vars->io_iov[0];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001587 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1588
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001589 rc = SMB2_ioctl_init(tcon, server,
1590 &rqst[1],
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001591 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001592 qi.info_type, true, buffer,
1593 qi.output_buffer_length,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10001594 CIFSMaxBufSize -
1595 MAX_SMB2_CREATE_RESPONSE_SIZE -
1596 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001597 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001598 } else if (qi.flags == PASSTHRU_SET_INFO) {
1599 /* Can eventually relax perm check since server enforces too */
1600 if (!capable(CAP_SYS_ADMIN))
1601 rc = -EPERM;
1602 else {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001603 rqst[1].rq_iov = &vars->si_iov[0];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001604 rqst[1].rq_nvec = 1;
1605
1606 size[0] = 8;
1607 data[0] = buffer;
1608
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001609 rc = SMB2_set_info_init(tcon, server,
1610 &rqst[1],
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001611 COMPOUND_FID, COMPOUND_FID,
1612 current->tgid,
1613 FILE_END_OF_FILE_INFORMATION,
1614 SMB2_O_INFO_FILE, 0, data, size);
1615 }
Steve French31ba4332019-03-13 02:40:07 -05001616 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001617 rqst[1].rq_iov = &vars->qi_iov[0];
Steve French31ba4332019-03-13 02:40:07 -05001618 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001619
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001620 rc = SMB2_query_info_init(tcon, server,
1621 &rqst[1], COMPOUND_FID,
Steve French31ba4332019-03-13 02:40:07 -05001622 COMPOUND_FID, qi.file_info_class,
1623 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001624 qi.input_buffer_length,
1625 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001626 } else { /* unknown flags */
Joe Perchesa0a30362020-04-14 22:42:53 -07001627 cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
1628 qi.flags);
Steve French31ba4332019-03-13 02:40:07 -05001629 rc = -EINVAL;
1630 }
1631
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001632 if (rc)
1633 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001634 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001635 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001636
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001637 /* Close */
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001638 rqst[2].rq_iov = &vars->close_iov[0];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001639 rqst[2].rq_nvec = 1;
1640
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001641 rc = SMB2_close_init(tcon, server,
1642 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001643 if (rc)
1644 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001645 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001646
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001647 rc = compound_send_recv(xid, ses, server,
1648 flags, 3, rqst,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001649 resp_buftype, rsp_iov);
1650 if (rc)
1651 goto iqinf_exit;
Steve Frenchd2f15422019-09-22 00:55:46 -05001652
1653 /* No need to bump num_remote_opens since handle immediately closed */
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001654 if (qi.flags & PASSTHRU_FSCTL) {
1655 pqi = (struct smb_query_info __user *)arg;
1656 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1657 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1658 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001659 if (qi.input_buffer_length > 0 &&
Markus Elfring2b1116b2019-11-05 22:26:53 +01001660 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1661 > rsp_iov[1].iov_len)
1662 goto e_fault;
1663
1664 if (copy_to_user(&pqi->input_buffer_length,
1665 &qi.input_buffer_length,
1666 sizeof(qi.input_buffer_length)))
1667 goto e_fault;
1668
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001669 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1670 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
Markus Elfring2b1116b2019-11-05 22:26:53 +01001671 qi.input_buffer_length))
1672 goto e_fault;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001673 } else {
1674 pqi = (struct smb_query_info __user *)arg;
1675 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1676 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1677 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
Markus Elfring2b1116b2019-11-05 22:26:53 +01001678 if (copy_to_user(&pqi->input_buffer_length,
1679 &qi.input_buffer_length,
1680 sizeof(qi.input_buffer_length)))
1681 goto e_fault;
1682
1683 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1684 qi.input_buffer_length))
1685 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001686 }
1687
1688 iqinf_exit:
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001689 kfree(vars);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001690 kfree(buffer);
1691 SMB2_open_free(&rqst[0]);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001692 if (qi.flags & PASSTHRU_FSCTL)
1693 SMB2_ioctl_free(&rqst[1]);
1694 else
1695 SMB2_query_info_free(&rqst[1]);
1696
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001697 SMB2_close_free(&rqst[2]);
1698 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1699 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1700 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001701 return rc;
Markus Elfring2b1116b2019-11-05 22:26:53 +01001702
1703e_fault:
1704 rc = -EFAULT;
1705 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001706}
1707
Sachin Prabhu620d8742017-02-10 16:03:51 +05301708static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001709smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001710 struct cifsFileInfo *srcfile,
1711 struct cifsFileInfo *trgtfile, u64 src_off,
1712 u64 len, u64 dest_off)
1713{
1714 int rc;
1715 unsigned int ret_data_len;
1716 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001717 struct copychunk_ioctl_rsp *retbuf = NULL;
1718 struct cifs_tcon *tcon;
1719 int chunks_copied = 0;
1720 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301721 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001722
1723 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1724
1725 if (pcchunk == NULL)
1726 return -ENOMEM;
1727
Christoph Probsta205d502019-05-08 21:36:25 +02001728 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001729 /* Request a key from the server to identify the source of the copy */
1730 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1731 srcfile->fid.persistent_fid,
1732 srcfile->fid.volatile_fid, pcchunk);
1733
1734 /* Note: request_res_key sets res_key null only if rc !=0 */
1735 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001736 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001737
1738 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001739 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001740 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001741 pcchunk->Reserved2 = 0;
1742
Steve French9bf0c9c2013-11-16 18:05:28 -06001743 tcon = tlink_tcon(trgtfile->tlink);
1744
1745 while (len > 0) {
1746 pcchunk->SourceOffset = cpu_to_le64(src_off);
1747 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1748 pcchunk->Length =
1749 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1750
1751 /* Request server copy to target from src identified by key */
1752 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001753 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001754 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001755 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1756 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001757 if (rc == 0) {
1758 if (ret_data_len !=
1759 sizeof(struct copychunk_ioctl_rsp)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001760 cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001761 rc = -EIO;
1762 goto cchunk_out;
1763 }
1764 if (retbuf->TotalBytesWritten == 0) {
1765 cifs_dbg(FYI, "no bytes copied\n");
1766 rc = -EIO;
1767 goto cchunk_out;
1768 }
1769 /*
1770 * Check if server claimed to write more than we asked
1771 */
1772 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1773 le32_to_cpu(pcchunk->Length)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001774 cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001775 rc = -EIO;
1776 goto cchunk_out;
1777 }
1778 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001779 cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001780 rc = -EIO;
1781 goto cchunk_out;
1782 }
1783 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001784
Sachin Prabhu620d8742017-02-10 16:03:51 +05301785 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1786 src_off += bytes_written;
1787 dest_off += bytes_written;
1788 len -= bytes_written;
1789 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001790
Sachin Prabhu620d8742017-02-10 16:03:51 +05301791 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001792 le32_to_cpu(retbuf->ChunksWritten),
1793 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301794 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001795 } else if (rc == -EINVAL) {
1796 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1797 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001798
Steve French9bf0c9c2013-11-16 18:05:28 -06001799 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1800 le32_to_cpu(retbuf->ChunksWritten),
1801 le32_to_cpu(retbuf->ChunkBytesWritten),
1802 le32_to_cpu(retbuf->TotalBytesWritten));
1803
1804 /*
1805 * Check if this is the first request using these sizes,
1806 * (ie check if copy succeed once with original sizes
1807 * and check if the server gave us different sizes after
1808 * we already updated max sizes on previous request).
1809 * if not then why is the server returning an error now
1810 */
1811 if ((chunks_copied != 0) || chunk_sizes_updated)
1812 goto cchunk_out;
1813
1814 /* Check that server is not asking us to grow size */
1815 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1816 tcon->max_bytes_chunk)
1817 tcon->max_bytes_chunk =
1818 le32_to_cpu(retbuf->ChunkBytesWritten);
1819 else
1820 goto cchunk_out; /* server gave us bogus size */
1821
1822 /* No need to change MaxChunks since already set to 1 */
1823 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001824 } else
1825 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001826 }
1827
1828cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001829 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001830 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301831 if (rc)
1832 return rc;
1833 else
1834 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001835}
1836
1837static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001838smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1839 struct cifs_fid *fid)
1840{
1841 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1842}
1843
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001844static unsigned int
1845smb2_read_data_offset(char *buf)
1846{
1847 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001848
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001849 return rsp->DataOffset;
1850}
1851
1852static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001853smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001854{
1855 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001856
1857 if (in_remaining)
1858 return le32_to_cpu(rsp->DataRemaining);
1859
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001860 return le32_to_cpu(rsp->DataLength);
1861}
1862
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001863
1864static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001865smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001866 struct cifs_io_parms *parms, unsigned int *bytes_read,
1867 char **buf, int *buf_type)
1868{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001869 parms->persistent_fid = pfid->persistent_fid;
1870 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001871 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1872}
1873
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001874static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001875smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001876 struct cifs_io_parms *parms, unsigned int *written,
1877 struct kvec *iov, unsigned long nr_segs)
1878{
1879
Steve Frenchdb8b6312014-09-22 05:13:55 -05001880 parms->persistent_fid = pfid->persistent_fid;
1881 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001882 return SMB2_write(xid, parms, written, iov, nr_segs);
1883}
1884
Steve Frenchd43cc792014-08-13 17:16:29 -05001885/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1886static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1887 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1888{
1889 struct cifsInodeInfo *cifsi;
1890 int rc;
1891
1892 cifsi = CIFS_I(inode);
1893
1894 /* if file already sparse don't bother setting sparse again */
1895 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1896 return true; /* already sparse */
1897
1898 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1899 return true; /* already not sparse */
1900
1901 /*
1902 * Can't check for sparse support on share the usual way via the
1903 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1904 * since Samba server doesn't set the flag on the share, yet
1905 * supports the set sparse FSCTL and returns sparse correctly
1906 * in the file attributes. If we fail setting sparse though we
1907 * mark that server does not support sparse files for this share
1908 * to avoid repeatedly sending the unsupported fsctl to server
1909 * if the file is repeatedly extended.
1910 */
1911 if (tcon->broken_sparse_sup)
1912 return false;
1913
1914 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1915 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001916 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05001917 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001918 if (rc) {
1919 tcon->broken_sparse_sup = true;
1920 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1921 return false;
1922 }
1923
1924 if (setsparse)
1925 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1926 else
1927 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1928
1929 return true;
1930}
1931
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001932static int
1933smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1934 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1935{
1936 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001937 struct inode *inode;
1938
1939 /*
1940 * If extending file more than one page make sparse. Many Linux fs
1941 * make files sparse by default when extending via ftruncate
1942 */
David Howells2b0143b2015-03-17 22:25:59 +00001943 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001944
1945 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001946 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001947
Steve Frenchd43cc792014-08-13 17:16:29 -05001948 /* whether set sparse succeeds or not, extend the file */
1949 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001950 }
1951
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001952 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001953 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001954}
1955
Steve French02b16662015-06-27 21:18:36 -07001956static int
1957smb2_duplicate_extents(const unsigned int xid,
1958 struct cifsFileInfo *srcfile,
1959 struct cifsFileInfo *trgtfile, u64 src_off,
1960 u64 len, u64 dest_off)
1961{
1962 int rc;
1963 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001964 struct duplicate_extents_to_file dup_ext_buf;
1965 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1966
1967 /* server fileays advertise duplicate extent support with this flag */
1968 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1969 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1970 return -EOPNOTSUPP;
1971
1972 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1973 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1974 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1975 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1976 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02001977 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07001978 src_off, dest_off, len);
1979
1980 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1981 if (rc)
1982 goto duplicate_extents_out;
1983
1984 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1985 trgtfile->fid.volatile_fid,
1986 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001987 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001988 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001989 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05001990 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07001991 &ret_data_len);
1992
1993 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02001994 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07001995
1996duplicate_extents_out:
1997 return rc;
1998}
Steve French02b16662015-06-27 21:18:36 -07001999
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002000static int
Steve French64a5cfa2013-10-14 15:31:32 -05002001smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
2002 struct cifsFileInfo *cfile)
2003{
2004 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
2005 cfile->fid.volatile_fid);
2006}
2007
2008static int
Steve Frenchb3152e22015-06-24 03:17:02 -05002009smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
2010 struct cifsFileInfo *cfile)
2011{
2012 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05002013 unsigned int ret_data_len;
2014
2015 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
2016 integr_info.Flags = 0;
2017 integr_info.Reserved = 0;
2018
2019 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2020 cfile->fid.volatile_fid,
2021 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002022 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01002023 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05002024 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05002025 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05002026 &ret_data_len);
2027
2028}
2029
Steve Frenche02789a2018-08-09 14:33:12 -05002030/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
2031#define GMT_TOKEN_SIZE 50
2032
Steve French153322f2019-03-28 22:32:49 -05002033#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
2034
Steve Frenche02789a2018-08-09 14:33:12 -05002035/*
2036 * Input buffer contains (empty) struct smb_snapshot array with size filled in
2037 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
2038 */
Steve Frenchb3152e22015-06-24 03:17:02 -05002039static int
Steve French834170c2016-09-30 21:14:26 -05002040smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2041 struct cifsFileInfo *cfile, void __user *ioc_buf)
2042{
2043 char *retbuf = NULL;
2044 unsigned int ret_data_len = 0;
2045 int rc;
Steve French153322f2019-03-28 22:32:49 -05002046 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05002047 struct smb_snapshot_array snapshot_in;
2048
Steve French973189a2019-04-04 00:41:04 -05002049 /*
2050 * On the first query to enumerate the list of snapshots available
2051 * for this volume the buffer begins with 0 (number of snapshots
2052 * which can be returned is zero since at that point we do not know
2053 * how big the buffer needs to be). On the second query,
2054 * it (ret_data_len) is set to number of snapshots so we can
2055 * know to set the maximum response size larger (see below).
2056 */
Steve French153322f2019-03-28 22:32:49 -05002057 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
2058 return -EFAULT;
2059
2060 /*
2061 * Note that for snapshot queries that servers like Azure expect that
2062 * the first query be minimal size (and just used to get the number/size
2063 * of previous versions) so response size must be specified as EXACTLY
2064 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2065 * of eight bytes.
2066 */
2067 if (ret_data_len == 0)
2068 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
2069 else
2070 max_response_size = CIFSMaxBufSize;
2071
Steve French834170c2016-09-30 21:14:26 -05002072 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2073 cfile->fid.volatile_fid,
2074 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002075 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002076 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05002077 (char **)&retbuf,
2078 &ret_data_len);
2079 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
2080 rc, ret_data_len);
2081 if (rc)
2082 return rc;
2083
2084 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
2085 /* Fixup buffer */
2086 if (copy_from_user(&snapshot_in, ioc_buf,
2087 sizeof(struct smb_snapshot_array))) {
2088 rc = -EFAULT;
2089 kfree(retbuf);
2090 return rc;
2091 }
Steve French834170c2016-09-30 21:14:26 -05002092
Steve Frenche02789a2018-08-09 14:33:12 -05002093 /*
2094 * Check for min size, ie not large enough to fit even one GMT
2095 * token (snapshot). On the first ioctl some users may pass in
2096 * smaller size (or zero) to simply get the size of the array
2097 * so the user space caller can allocate sufficient memory
2098 * and retry the ioctl again with larger array size sufficient
2099 * to hold all of the snapshot GMT tokens on the second try.
2100 */
2101 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2102 ret_data_len = sizeof(struct smb_snapshot_array);
2103
2104 /*
2105 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2106 * the snapshot array (of 50 byte GMT tokens) each
2107 * representing an available previous version of the data
2108 */
2109 if (ret_data_len > (snapshot_in.snapshot_array_size +
2110 sizeof(struct smb_snapshot_array)))
2111 ret_data_len = snapshot_in.snapshot_array_size +
2112 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05002113
2114 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2115 rc = -EFAULT;
2116 }
2117
2118 kfree(retbuf);
2119 return rc;
2120}
2121
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002122
2123
2124static int
2125smb3_notify(const unsigned int xid, struct file *pfile,
2126 void __user *ioc_buf)
2127{
2128 struct smb3_notify notify;
2129 struct dentry *dentry = pfile->f_path.dentry;
2130 struct inode *inode = file_inode(pfile);
2131 struct cifs_sb_info *cifs_sb;
2132 struct cifs_open_parms oparms;
2133 struct cifs_fid fid;
2134 struct cifs_tcon *tcon;
2135 unsigned char *path = NULL;
2136 __le16 *utf16_path = NULL;
2137 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2138 int rc = 0;
2139
2140 path = build_path_from_dentry(dentry);
2141 if (path == NULL)
2142 return -ENOMEM;
2143
2144 cifs_sb = CIFS_SB(inode->i_sb);
2145
2146 utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
2147 if (utf16_path == NULL) {
2148 rc = -ENOMEM;
2149 goto notify_exit;
2150 }
2151
2152 if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
2153 rc = -EFAULT;
2154 goto notify_exit;
2155 }
2156
2157 tcon = cifs_sb_master_tcon(cifs_sb);
2158 oparms.tcon = tcon;
Steve French4ef9b4f2020-07-07 18:08:46 -05002159 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002160 oparms.disposition = FILE_OPEN;
2161 oparms.create_options = cifs_create_options(cifs_sb, 0);
2162 oparms.fid = &fid;
2163 oparms.reconnect = false;
2164
Aurelien Aptel69dda302020-03-02 17:53:22 +01002165 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
2166 NULL);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002167 if (rc)
2168 goto notify_exit;
2169
2170 rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
2171 notify.watch_tree, notify.completion_filter);
2172
2173 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2174
2175 cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
2176
2177notify_exit:
2178 kfree(path);
2179 kfree(utf16_path);
2180 return rc;
2181}
2182
Steve French834170c2016-09-30 21:14:26 -05002183static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002184smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2185 const char *path, struct cifs_sb_info *cifs_sb,
2186 struct cifs_fid *fid, __u16 search_flags,
2187 struct cifs_search_info *srch_inf)
2188{
2189 __le16 *utf16_path;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002190 struct smb_rqst rqst[2];
2191 struct kvec rsp_iov[2];
2192 int resp_buftype[2];
2193 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2194 struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
2195 int rc, flags = 0;
2196 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002197 struct cifs_open_parms oparms;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002198 struct smb2_query_directory_rsp *qd_rsp = NULL;
2199 struct smb2_create_rsp *op_rsp = NULL;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002200 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002201
2202 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2203 if (!utf16_path)
2204 return -ENOMEM;
2205
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002206 if (smb3_encryption_required(tcon))
2207 flags |= CIFS_TRANSFORM_REQ;
2208
2209 memset(rqst, 0, sizeof(rqst));
2210 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
2211 memset(rsp_iov, 0, sizeof(rsp_iov));
2212
2213 /* Open */
2214 memset(&open_iov, 0, sizeof(open_iov));
2215 rqst[0].rq_iov = open_iov;
2216 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2217
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002218 oparms.tcon = tcon;
2219 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2220 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002221 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002222 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002223 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002224
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002225 rc = SMB2_open_init(tcon, server,
2226 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002227 if (rc)
2228 goto qdf_free;
2229 smb2_set_next_command(tcon, &rqst[0]);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002230
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002231 /* Query directory */
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002232 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02002233 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002234
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002235 memset(&qd_iov, 0, sizeof(qd_iov));
2236 rqst[1].rq_iov = qd_iov;
2237 rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
2238
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002239 rc = SMB2_query_directory_init(xid, tcon, server,
2240 &rqst[1],
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002241 COMPOUND_FID, COMPOUND_FID,
2242 0, srch_inf->info_level);
2243 if (rc)
2244 goto qdf_free;
2245
2246 smb2_set_related(&rqst[1]);
2247
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002248 rc = compound_send_recv(xid, tcon->ses, server,
2249 flags, 2, rqst,
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002250 resp_buftype, rsp_iov);
2251
2252 /* If the open failed there is nothing to do */
2253 op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
2254 if (op_rsp == NULL || op_rsp->sync_hdr.Status != STATUS_SUCCESS) {
2255 cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
2256 goto qdf_free;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002257 }
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002258 fid->persistent_fid = op_rsp->PersistentFileId;
2259 fid->volatile_fid = op_rsp->VolatileFileId;
2260
2261 /* Anything else than ENODATA means a genuine error */
2262 if (rc && rc != -ENODATA) {
2263 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2264 cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
2265 trace_smb3_query_dir_err(xid, fid->persistent_fid,
2266 tcon->tid, tcon->ses->Suid, 0, 0, rc);
2267 goto qdf_free;
2268 }
2269
Shyam Prasad N1be1fa42020-03-09 01:35:09 -07002270 atomic_inc(&tcon->num_remote_opens);
2271
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002272 qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
2273 if (qd_rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
2274 trace_smb3_query_dir_done(xid, fid->persistent_fid,
2275 tcon->tid, tcon->ses->Suid, 0, 0);
2276 srch_inf->endOfSearch = true;
2277 rc = 0;
2278 goto qdf_free;
2279 }
2280
2281 rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
2282 srch_inf);
2283 if (rc) {
2284 trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
2285 tcon->ses->Suid, 0, 0, rc);
2286 goto qdf_free;
2287 }
2288 resp_buftype[1] = CIFS_NO_BUFFER;
2289
2290 trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
2291 tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
2292
2293 qdf_free:
2294 kfree(utf16_path);
2295 SMB2_open_free(&rqst[0]);
2296 SMB2_query_directory_free(&rqst[1]);
2297 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2298 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002299 return rc;
2300}
2301
2302static int
2303smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2304 struct cifs_fid *fid, __u16 search_flags,
2305 struct cifs_search_info *srch_inf)
2306{
2307 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2308 fid->volatile_fid, 0, srch_inf);
2309}
2310
2311static int
2312smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2313 struct cifs_fid *fid)
2314{
2315 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2316}
2317
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002318/*
Christoph Probsta205d502019-05-08 21:36:25 +02002319 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2320 * the number of credits and return true. Otherwise - return false.
2321 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002322static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002323smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002324{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002325 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002326
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002327 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002328 return false;
2329
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002330 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002331 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002332 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002333 spin_unlock(&server->req_lock);
2334 wake_up(&server->request_q);
2335 }
2336
2337 return true;
2338}
2339
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002340static bool
2341smb2_is_session_expired(char *buf)
2342{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002343 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002344
Mark Symsd81243c2018-05-24 09:47:31 +01002345 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2346 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002347 return false;
2348
Steve Frenche68a9322018-07-30 14:23:58 -05002349 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2350 le16_to_cpu(shdr->Command),
2351 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002352 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002353
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002354 return true;
2355}
2356
Rohith Surabattula8e670f72020-09-18 05:37:28 +00002357static bool
2358smb2_is_status_io_timeout(char *buf)
2359{
2360 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
2361
2362 if (shdr->Status == STATUS_IO_TIMEOUT)
2363 return true;
2364 else
2365 return false;
2366}
2367
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002368static int
2369smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2370 struct cifsInodeInfo *cinode)
2371{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002372 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2373 return SMB2_lease_break(0, tcon, cinode->lease_key,
2374 smb2_get_lease_state(cinode));
2375
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002376 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2377 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002378 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002379}
2380
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002381void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002382smb2_set_related(struct smb_rqst *rqst)
2383{
2384 struct smb2_sync_hdr *shdr;
2385
2386 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002387 if (shdr == NULL) {
2388 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2389 return;
2390 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002391 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2392}
2393
2394char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2395
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002396void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002397smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002398{
2399 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002400 struct cifs_ses *ses = tcon->ses;
2401 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002402 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002403 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002404
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002405 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2406 if (shdr == NULL) {
2407 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2408 return;
2409 }
2410
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002411 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002412
2413 /* No padding needed */
2414 if (!(len & 7))
2415 goto finished;
2416
2417 num_padding = 8 - (len & 7);
2418 if (!smb3_encryption_required(tcon)) {
2419 /*
2420 * If we do not have encryption then we can just add an extra
2421 * iov for the padding.
2422 */
2423 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2424 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2425 rqst->rq_nvec++;
2426 len += num_padding;
2427 } else {
2428 /*
2429 * We can not add a small padding iov for the encryption case
2430 * because the encryption framework can not handle the padding
2431 * iovs.
2432 * We have to flatten this into a single buffer and add
2433 * the padding to it.
2434 */
2435 for (i = 1; i < rqst->rq_nvec; i++) {
2436 memcpy(rqst->rq_iov[0].iov_base +
2437 rqst->rq_iov[0].iov_len,
2438 rqst->rq_iov[i].iov_base,
2439 rqst->rq_iov[i].iov_len);
2440 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002441 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002442 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2443 0, num_padding);
2444 rqst->rq_iov[0].iov_len += num_padding;
2445 len += num_padding;
2446 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002447 }
2448
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002449 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002450 shdr->NextCommand = cpu_to_le32(len);
2451}
2452
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002453/*
2454 * Passes the query info response back to the caller on success.
2455 * Caller need to free this with free_rsp_buf().
2456 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002457int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002458smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2459 __le16 *utf16_path, u32 desired_access,
2460 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002461 struct kvec *rsp, int *buftype,
2462 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002463{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002464 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002465 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002466 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002467 struct smb_rqst rqst[3];
2468 int resp_buftype[3];
2469 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002470 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002471 struct kvec qi_iov[1];
2472 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002473 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002474 struct cifs_open_parms oparms;
2475 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002476 int rc;
2477
2478 if (smb3_encryption_required(tcon))
2479 flags |= CIFS_TRANSFORM_REQ;
2480
2481 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002482 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002483 memset(rsp_iov, 0, sizeof(rsp_iov));
2484
2485 memset(&open_iov, 0, sizeof(open_iov));
2486 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002487 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002488
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002489 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002490 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002491 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002492 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002493 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002494 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002495
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002496 rc = SMB2_open_init(tcon, server,
2497 &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002498 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002499 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002500 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002501
2502 memset(&qi_iov, 0, sizeof(qi_iov));
2503 rqst[1].rq_iov = qi_iov;
2504 rqst[1].rq_nvec = 1;
2505
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002506 rc = SMB2_query_info_init(tcon, server,
2507 &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002508 class, type, 0,
2509 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002510 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002511 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002512 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002513 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002514 smb2_set_related(&rqst[1]);
2515
2516 memset(&close_iov, 0, sizeof(close_iov));
2517 rqst[2].rq_iov = close_iov;
2518 rqst[2].rq_nvec = 1;
2519
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002520 rc = SMB2_close_init(tcon, server,
2521 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002522 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002523 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002524 smb2_set_related(&rqst[2]);
2525
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002526 rc = compound_send_recv(xid, ses, server,
2527 flags, 3, rqst,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002528 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002529 if (rc) {
2530 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French7dcc82c2019-09-11 00:07:36 -05002531 if (rc == -EREMCHG) {
2532 tcon->need_reconnect = true;
Joe Perchesa0a30362020-04-14 22:42:53 -07002533 pr_warn_once("server share %s deleted\n",
2534 tcon->treeName);
Steve French7dcc82c2019-09-11 00:07:36 -05002535 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002536 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002537 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002538 *rsp = rsp_iov[1];
2539 *buftype = resp_buftype[1];
2540
2541 qic_exit:
2542 SMB2_open_free(&rqst[0]);
2543 SMB2_query_info_free(&rqst[1]);
2544 SMB2_close_free(&rqst[2]);
2545 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2546 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2547 return rc;
2548}
2549
2550static int
2551smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002552 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002553{
2554 struct smb2_query_info_rsp *rsp;
2555 struct smb2_fs_full_size_info *info = NULL;
2556 __le16 utf16_path = 0; /* Null - open root of share */
2557 struct kvec rsp_iov = {NULL, 0};
2558 int buftype = CIFS_NO_BUFFER;
2559 int rc;
2560
2561
2562 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2563 FILE_READ_ATTRIBUTES,
2564 FS_FULL_SIZE_INFORMATION,
2565 SMB2_O_INFO_FILESYSTEM,
2566 sizeof(struct smb2_fs_full_size_info),
Steve French87f93d82020-02-04 13:02:59 -06002567 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002568 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002569 goto qfs_exit;
2570
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002571 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002572 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002573 info = (struct smb2_fs_full_size_info *)(
2574 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2575 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2576 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002577 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002578 sizeof(struct smb2_fs_full_size_info));
2579 if (!rc)
2580 smb2_copy_fs_info_to_kstatfs(info, buf);
2581
2582qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002583 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002584 return rc;
2585}
2586
Steve French2d304212018-06-24 23:28:12 -05002587static int
2588smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002589 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Steve French2d304212018-06-24 23:28:12 -05002590{
2591 int rc;
2592 __le16 srch_path = 0; /* Null - open root of share */
2593 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2594 struct cifs_open_parms oparms;
2595 struct cifs_fid fid;
2596
2597 if (!tcon->posix_extensions)
Amir Goldstein0f060932020-02-03 21:46:43 +02002598 return smb2_queryfs(xid, tcon, cifs_sb, buf);
Steve French2d304212018-06-24 23:28:12 -05002599
2600 oparms.tcon = tcon;
2601 oparms.desired_access = FILE_READ_ATTRIBUTES;
2602 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002603 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French2d304212018-06-24 23:28:12 -05002604 oparms.fid = &fid;
2605 oparms.reconnect = false;
2606
Aurelien Aptel69dda302020-03-02 17:53:22 +01002607 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
2608 NULL, NULL);
Steve French2d304212018-06-24 23:28:12 -05002609 if (rc)
2610 return rc;
2611
2612 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2613 fid.volatile_fid, buf);
2614 buf->f_type = SMB2_MAGIC_NUMBER;
2615 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2616 return rc;
2617}
Steve French2d304212018-06-24 23:28:12 -05002618
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002619static bool
2620smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2621{
2622 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2623 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2624}
2625
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002626static int
2627smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2628 __u64 length, __u32 type, int lock, int unlock, bool wait)
2629{
2630 if (unlock && !lock)
2631 type = SMB2_LOCKFLAG_UNLOCK;
2632 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2633 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2634 current->tgid, length, offset, type, wait);
2635}
2636
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002637static void
2638smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2639{
2640 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2641}
2642
2643static void
2644smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2645{
2646 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2647}
2648
2649static void
2650smb2_new_lease_key(struct cifs_fid *fid)
2651{
Steve Frenchfa70b872016-09-22 00:39:34 -05002652 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002653}
2654
Aurelien Aptel9d496402017-02-13 16:16:49 +01002655static int
2656smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2657 const char *search_name,
2658 struct dfs_info3_param **target_nodes,
2659 unsigned int *num_of_nodes,
2660 const struct nls_table *nls_codepage, int remap)
2661{
2662 int rc;
2663 __le16 *utf16_path = NULL;
2664 int utf16_path_len = 0;
2665 struct cifs_tcon *tcon;
2666 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2667 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2668 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2669
Christoph Probsta205d502019-05-08 21:36:25 +02002670 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002671
2672 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002673 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002674 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002675 tcon = ses->tcon_ipc;
2676 if (tcon == NULL) {
2677 spin_lock(&cifs_tcp_ses_lock);
2678 tcon = list_first_entry_or_null(&ses->tcon_list,
2679 struct cifs_tcon,
2680 tcon_list);
2681 if (tcon)
2682 tcon->tc_count++;
2683 spin_unlock(&cifs_tcp_ses_lock);
2684 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002685
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002686 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002687 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2688 ses);
2689 rc = -ENOTCONN;
2690 goto out;
2691 }
2692
2693 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2694 &utf16_path_len,
2695 nls_codepage, remap);
2696 if (!utf16_path) {
2697 rc = -ENOMEM;
2698 goto out;
2699 }
2700
2701 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2702 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2703 if (!dfs_req) {
2704 rc = -ENOMEM;
2705 goto out;
2706 }
2707
2708 /* Highest DFS referral version understood */
2709 dfs_req->MaxReferralLevel = DFS_VERSION;
2710
2711 /* Path to resolve in an UTF-16 null-terminated string */
2712 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2713
2714 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002715 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2716 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002717 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002718 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002719 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002720 } while (rc == -EAGAIN);
2721
2722 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002723 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002724 cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002725 goto out;
2726 }
2727
2728 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2729 num_of_nodes, target_nodes,
2730 nls_codepage, remap, search_name,
2731 true /* is_unicode */);
2732 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002733 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002734 goto out;
2735 }
2736
2737 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002738 if (tcon && !tcon->ipc) {
2739 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002740 spin_lock(&cifs_tcp_ses_lock);
2741 tcon->tc_count--;
2742 spin_unlock(&cifs_tcp_ses_lock);
2743 }
2744 kfree(utf16_path);
2745 kfree(dfs_req);
2746 kfree(dfs_rsp);
2747 return rc;
2748}
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002749
2750static int
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002751parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2752 u32 plen, char **target_path,
2753 struct cifs_sb_info *cifs_sb)
2754{
2755 unsigned int len;
2756
2757 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2758 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2759
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002760 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2761 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2762 le64_to_cpu(symlink_buf->InodeType));
2763 return -EOPNOTSUPP;
2764 }
2765
2766 *target_path = cifs_strndup_from_utf16(
2767 symlink_buf->PathBuffer,
2768 len, true, cifs_sb->local_nls);
2769 if (!(*target_path))
2770 return -ENOMEM;
2771
2772 convert_delimiter(*target_path, '/');
2773 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2774
2775 return 0;
2776}
2777
2778static int
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002779parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2780 u32 plen, char **target_path,
2781 struct cifs_sb_info *cifs_sb)
2782{
2783 unsigned int sub_len;
2784 unsigned int sub_offset;
2785
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002786 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002787
2788 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2789 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2790 if (sub_offset + 20 > plen ||
2791 sub_offset + sub_len + 20 > plen) {
2792 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2793 return -EIO;
2794 }
2795
2796 *target_path = cifs_strndup_from_utf16(
2797 symlink_buf->PathBuffer + sub_offset,
2798 sub_len, true, cifs_sb->local_nls);
2799 if (!(*target_path))
2800 return -ENOMEM;
2801
2802 convert_delimiter(*target_path, '/');
2803 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2804
2805 return 0;
2806}
2807
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002808static int
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002809parse_reparse_point(struct reparse_data_buffer *buf,
2810 u32 plen, char **target_path,
2811 struct cifs_sb_info *cifs_sb)
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002812{
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002813 if (plen < sizeof(struct reparse_data_buffer)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07002814 cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n",
2815 plen);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002816 return -EIO;
2817 }
2818
2819 if (plen < le16_to_cpu(buf->ReparseDataLength) +
2820 sizeof(struct reparse_data_buffer)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07002821 cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n",
2822 plen);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002823 return -EIO;
2824 }
2825
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002826 /* See MS-FSCC 2.1.2 */
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002827 switch (le32_to_cpu(buf->ReparseTag)) {
2828 case IO_REPARSE_TAG_NFS:
2829 return parse_reparse_posix(
2830 (struct reparse_posix_data *)buf,
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002831 plen, target_path, cifs_sb);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002832 case IO_REPARSE_TAG_SYMLINK:
2833 return parse_reparse_symlink(
2834 (struct reparse_symlink_data_buffer *)buf,
2835 plen, target_path, cifs_sb);
2836 default:
Joe Perchesa0a30362020-04-14 22:42:53 -07002837 cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n",
2838 le32_to_cpu(buf->ReparseTag));
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002839 return -EOPNOTSUPP;
2840 }
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002841}
2842
Pavel Shilovsky78932422016-07-24 10:37:38 +03002843#define SMB2_SYMLINK_STRUCT_SIZE \
2844 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2845
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002846static int
2847smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002848 struct cifs_sb_info *cifs_sb, const char *full_path,
2849 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002850{
2851 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002852 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002853 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2854 struct cifs_open_parms oparms;
2855 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002856 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002857 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002858 struct smb2_symlink_err_rsp *symlink;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002859 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002860 unsigned int sub_len;
2861 unsigned int sub_offset;
2862 unsigned int print_len;
2863 unsigned int print_offset;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002864 int flags = 0;
2865 struct smb_rqst rqst[3];
2866 int resp_buftype[3];
2867 struct kvec rsp_iov[3];
2868 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2869 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
2870 struct kvec close_iov[1];
2871 struct smb2_create_rsp *create_rsp;
2872 struct smb2_ioctl_rsp *ioctl_rsp;
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002873 struct reparse_data_buffer *reparse_buf;
Amir Goldstein0f060932020-02-03 21:46:43 +02002874 int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002875 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002876
2877 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2878
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002879 *target_path = NULL;
2880
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002881 if (smb3_encryption_required(tcon))
2882 flags |= CIFS_TRANSFORM_REQ;
2883
2884 memset(rqst, 0, sizeof(rqst));
2885 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2886 memset(rsp_iov, 0, sizeof(rsp_iov));
2887
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002888 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2889 if (!utf16_path)
2890 return -ENOMEM;
2891
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002892 /* Open */
2893 memset(&open_iov, 0, sizeof(open_iov));
2894 rqst[0].rq_iov = open_iov;
2895 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2896
2897 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002898 oparms.tcon = tcon;
2899 oparms.desired_access = FILE_READ_ATTRIBUTES;
2900 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002901 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002902 oparms.fid = &fid;
2903 oparms.reconnect = false;
2904
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002905 rc = SMB2_open_init(tcon, server,
2906 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002907 if (rc)
2908 goto querty_exit;
2909 smb2_set_next_command(tcon, &rqst[0]);
2910
2911
2912 /* IOCTL */
2913 memset(&io_iov, 0, sizeof(io_iov));
2914 rqst[1].rq_iov = io_iov;
2915 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
2916
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002917 rc = SMB2_ioctl_init(tcon, server,
2918 &rqst[1], fid.persistent_fid,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002919 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10002920 true /* is_fctl */, NULL, 0,
2921 CIFSMaxBufSize -
2922 MAX_SMB2_CREATE_RESPONSE_SIZE -
2923 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002924 if (rc)
2925 goto querty_exit;
2926
2927 smb2_set_next_command(tcon, &rqst[1]);
2928 smb2_set_related(&rqst[1]);
2929
2930
2931 /* Close */
2932 memset(&close_iov, 0, sizeof(close_iov));
2933 rqst[2].rq_iov = close_iov;
2934 rqst[2].rq_nvec = 1;
2935
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002936 rc = SMB2_close_init(tcon, server,
2937 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002938 if (rc)
2939 goto querty_exit;
2940
2941 smb2_set_related(&rqst[2]);
2942
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002943 rc = compound_send_recv(xid, tcon->ses, server,
2944 flags, 3, rqst,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002945 resp_buftype, rsp_iov);
2946
2947 create_rsp = rsp_iov[0].iov_base;
2948 if (create_rsp && create_rsp->sync_hdr.Status)
2949 err_iov = rsp_iov[0];
2950 ioctl_rsp = rsp_iov[1].iov_base;
2951
2952 /*
2953 * Open was successful and we got an ioctl response.
2954 */
2955 if ((rc == 0) && (is_reparse_point)) {
2956 /* See MS-FSCC 2.3.23 */
2957
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002958 reparse_buf = (struct reparse_data_buffer *)
2959 ((char *)ioctl_rsp +
2960 le32_to_cpu(ioctl_rsp->OutputOffset));
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002961 plen = le32_to_cpu(ioctl_rsp->OutputCount);
2962
2963 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
2964 rsp_iov[1].iov_len) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002965 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002966 plen);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002967 rc = -EIO;
2968 goto querty_exit;
2969 }
2970
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002971 rc = parse_reparse_point(reparse_buf, plen, target_path,
2972 cifs_sb);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002973 goto querty_exit;
2974 }
2975
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002976 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002977 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002978 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002979 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002980
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002981 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002982 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002983 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002984 rc = -EINVAL;
2985 goto querty_exit;
2986 }
2987
2988 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2989 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
2990 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
2991 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002992 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002993 }
2994
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002995 /* open must fail on symlink - reset rc */
2996 rc = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002997 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2998 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002999 print_len = le16_to_cpu(symlink->PrintNameLength);
3000 print_offset = le16_to_cpu(symlink->PrintNameOffset);
3001
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003002 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10003003 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003004 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003005 }
3006
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003007 if (err_iov.iov_len <
3008 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10003009 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003010 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003011 }
3012
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003013 *target_path = cifs_strndup_from_utf16(
3014 (char *)symlink->PathBuffer + sub_offset,
3015 sub_len, true, cifs_sb->local_nls);
3016 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003017 rc = -ENOMEM;
3018 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003019 }
3020 convert_delimiter(*target_path, '/');
3021 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003022
3023 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003024 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003025 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003026 SMB2_open_free(&rqst[0]);
3027 SMB2_ioctl_free(&rqst[1]);
3028 SMB2_close_free(&rqst[2]);
3029 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
3030 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
3031 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003032 return rc;
3033}
3034
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003035static struct cifs_ntsd *
3036get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
3037 const struct cifs_fid *cifsfid, u32 *pacllen)
3038{
3039 struct cifs_ntsd *pntsd = NULL;
3040 unsigned int xid;
3041 int rc = -EOPNOTSUPP;
3042 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3043
3044 if (IS_ERR(tlink))
3045 return ERR_CAST(tlink);
3046
3047 xid = get_xid();
3048 cifs_dbg(FYI, "trying to get acl\n");
3049
3050 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
3051 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
3052 free_xid(xid);
3053
3054 cifs_put_tlink(tlink);
3055
3056 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3057 if (rc)
3058 return ERR_PTR(rc);
3059 return pntsd;
3060
3061}
3062
3063static struct cifs_ntsd *
3064get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
3065 const char *path, u32 *pacllen)
3066{
3067 struct cifs_ntsd *pntsd = NULL;
3068 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3069 unsigned int xid;
3070 int rc;
3071 struct cifs_tcon *tcon;
3072 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3073 struct cifs_fid fid;
3074 struct cifs_open_parms oparms;
3075 __le16 *utf16_path;
3076
3077 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
3078 if (IS_ERR(tlink))
3079 return ERR_CAST(tlink);
3080
3081 tcon = tlink_tcon(tlink);
3082 xid = get_xid();
3083
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003084 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05003085 if (!utf16_path) {
3086 rc = -ENOMEM;
3087 free_xid(xid);
3088 return ERR_PTR(rc);
3089 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003090
3091 oparms.tcon = tcon;
3092 oparms.desired_access = READ_CONTROL;
3093 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02003094 oparms.create_options = cifs_create_options(cifs_sb, 0);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003095 oparms.fid = &fid;
3096 oparms.reconnect = false;
3097
Aurelien Aptel69dda302020-03-02 17:53:22 +01003098 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
3099 NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003100 kfree(utf16_path);
3101 if (!rc) {
3102 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3103 fid.volatile_fid, (void **)&pntsd, pacllen);
3104 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3105 }
3106
3107 cifs_put_tlink(tlink);
3108 free_xid(xid);
3109
3110 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3111 if (rc)
3112 return ERR_PTR(rc);
3113 return pntsd;
3114}
3115
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003116static int
3117set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
3118 struct inode *inode, const char *path, int aclflag)
3119{
3120 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3121 unsigned int xid;
3122 int rc, access_flags = 0;
3123 struct cifs_tcon *tcon;
3124 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3125 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3126 struct cifs_fid fid;
3127 struct cifs_open_parms oparms;
3128 __le16 *utf16_path;
3129
3130 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
3131 if (IS_ERR(tlink))
3132 return PTR_ERR(tlink);
3133
3134 tcon = tlink_tcon(tlink);
3135 xid = get_xid();
3136
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003137 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
3138 access_flags = WRITE_OWNER;
3139 else
3140 access_flags = WRITE_DAC;
3141
3142 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05003143 if (!utf16_path) {
3144 rc = -ENOMEM;
3145 free_xid(xid);
3146 return rc;
3147 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003148
3149 oparms.tcon = tcon;
3150 oparms.desired_access = access_flags;
Amir Goldstein0f060932020-02-03 21:46:43 +02003151 oparms.create_options = cifs_create_options(cifs_sb, 0);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003152 oparms.disposition = FILE_OPEN;
3153 oparms.path = path;
3154 oparms.fid = &fid;
3155 oparms.reconnect = false;
3156
Aurelien Aptel69dda302020-03-02 17:53:22 +01003157 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
3158 NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003159 kfree(utf16_path);
3160 if (!rc) {
3161 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3162 fid.volatile_fid, pnntsd, acllen, aclflag);
3163 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3164 }
3165
3166 cifs_put_tlink(tlink);
3167 free_xid(xid);
3168 return rc;
3169}
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003170
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003171/* Retrieve an ACL from the server */
3172static struct cifs_ntsd *
3173get_smb2_acl(struct cifs_sb_info *cifs_sb,
3174 struct inode *inode, const char *path,
3175 u32 *pacllen)
3176{
3177 struct cifs_ntsd *pntsd = NULL;
3178 struct cifsFileInfo *open_file = NULL;
3179
3180 if (inode)
3181 open_file = find_readable_file(CIFS_I(inode), true);
3182 if (!open_file)
3183 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
3184
3185 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
3186 cifsFileInfo_put(open_file);
3187 return pntsd;
3188}
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003189
Steve French30175622014-08-17 18:16:40 -05003190static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
3191 loff_t offset, loff_t len, bool keep_size)
3192{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003193 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05003194 struct inode *inode;
3195 struct cifsInodeInfo *cifsi;
3196 struct cifsFileInfo *cfile = file->private_data;
3197 struct file_zero_data_information fsctl_buf;
3198 long rc;
3199 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003200 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05003201
3202 xid = get_xid();
3203
David Howells2b0143b2015-03-17 22:25:59 +00003204 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05003205 cifsi = CIFS_I(inode);
3206
Christoph Probsta205d502019-05-08 21:36:25 +02003207 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05003208 ses->Suid, offset, len);
3209
Zhang Xiaoxu6b690402020-06-23 07:31:54 -04003210 /*
3211 * We zero the range through ioctl, so we need remove the page caches
3212 * first, otherwise the data may be inconsistent with the server.
3213 */
3214 truncate_pagecache_range(inode, offset, offset + len - 1);
Steve French779ede02019-03-13 01:41:49 -05003215
Steve French30175622014-08-17 18:16:40 -05003216 /* if file not oplocked can't be sure whether asking to extend size */
3217 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003218 if (keep_size == false) {
3219 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003220 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
3221 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003222 free_xid(xid);
3223 return rc;
3224 }
Steve French30175622014-08-17 18:16:40 -05003225
Steve Frenchd1c35af2019-05-09 00:09:37 -05003226 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05003227
3228 fsctl_buf.FileOffset = cpu_to_le64(offset);
3229 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3230
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003231 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3232 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
3233 (char *)&fsctl_buf,
3234 sizeof(struct file_zero_data_information),
3235 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003236 if (rc)
3237 goto zero_range_exit;
3238
3239 /*
3240 * do we also need to change the size of the file?
3241 */
3242 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003243 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003244 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3245 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003246 }
3247
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003248 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05003249 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05003250 if (rc)
3251 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3252 ses->Suid, offset, len, rc);
3253 else
3254 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3255 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05003256 return rc;
3257}
3258
Steve French31742c52014-08-17 08:38:47 -05003259static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3260 loff_t offset, loff_t len)
3261{
3262 struct inode *inode;
Steve French31742c52014-08-17 08:38:47 -05003263 struct cifsFileInfo *cfile = file->private_data;
3264 struct file_zero_data_information fsctl_buf;
3265 long rc;
3266 unsigned int xid;
3267 __u8 set_sparse = 1;
3268
3269 xid = get_xid();
3270
David Howells2b0143b2015-03-17 22:25:59 +00003271 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05003272
3273 /* Need to make file sparse, if not already, before freeing range. */
3274 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05003275 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3276 rc = -EOPNOTSUPP;
3277 free_xid(xid);
3278 return rc;
3279 }
Steve French31742c52014-08-17 08:38:47 -05003280
Zhang Xiaoxuacc91c22020-06-23 07:31:53 -04003281 /*
3282 * We implement the punch hole through ioctl, so we need remove the page
3283 * caches first, otherwise the data may be inconsistent with the server.
3284 */
3285 truncate_pagecache_range(inode, offset, offset + len - 1);
3286
Christoph Probsta205d502019-05-08 21:36:25 +02003287 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05003288
3289 fsctl_buf.FileOffset = cpu_to_le64(offset);
3290 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3291
3292 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3293 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01003294 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05003295 sizeof(struct file_zero_data_information),
3296 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05003297 free_xid(xid);
3298 return rc;
3299}
3300
Steve French9ccf3212014-10-18 17:01:15 -05003301static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3302 loff_t off, loff_t len, bool keep_size)
3303{
3304 struct inode *inode;
3305 struct cifsInodeInfo *cifsi;
3306 struct cifsFileInfo *cfile = file->private_data;
3307 long rc = -EOPNOTSUPP;
3308 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003309 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05003310
3311 xid = get_xid();
3312
David Howells2b0143b2015-03-17 22:25:59 +00003313 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05003314 cifsi = CIFS_I(inode);
3315
Steve French779ede02019-03-13 01:41:49 -05003316 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3317 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003318 /* if file not oplocked can't be sure whether asking to extend size */
3319 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003320 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05003321 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3322 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003323 free_xid(xid);
3324 return rc;
3325 }
Steve French9ccf3212014-10-18 17:01:15 -05003326
3327 /*
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003328 * Extending the file
3329 */
3330 if ((keep_size == false) && i_size_read(inode) < off + len) {
Murphy Zhouef4a6322020-03-18 20:43:38 +08003331 rc = inode_newsize_ok(inode, off + len);
3332 if (rc)
3333 goto out;
3334
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003335 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0)
3336 smb2_set_sparse(xid, tcon, cfile, inode, false);
3337
3338 eof = cpu_to_le64(off + len);
3339 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3340 cfile->fid.volatile_fid, cfile->pid, &eof);
3341 if (rc == 0) {
3342 cifsi->server_eof = off + len;
3343 cifs_setsize(inode, off + len);
3344 cifs_truncate_page(inode->i_mapping, inode->i_size);
3345 truncate_setsize(inode, off + len);
3346 }
3347 goto out;
3348 }
3349
3350 /*
Steve French9ccf3212014-10-18 17:01:15 -05003351 * Files are non-sparse by default so falloc may be a no-op
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003352 * Must check if file sparse. If not sparse, and since we are not
3353 * extending then no need to do anything since file already allocated
Steve French9ccf3212014-10-18 17:01:15 -05003354 */
3355 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003356 rc = 0;
3357 goto out;
Steve French9ccf3212014-10-18 17:01:15 -05003358 }
3359
3360 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3361 /*
3362 * Check if falloc starts within first few pages of file
3363 * and ends within a few pages of the end of file to
3364 * ensure that most of file is being forced to be
3365 * fallocated now. If so then setting whole file sparse
3366 * ie potentially making a few extra pages at the beginning
3367 * or end of the file non-sparse via set_sparse is harmless.
3368 */
Steve Frenchcfe89092018-05-19 02:04:55 -05003369 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3370 rc = -EOPNOTSUPP;
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003371 goto out;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003372 }
Steve French9ccf3212014-10-18 17:01:15 -05003373 }
Steve French9ccf3212014-10-18 17:01:15 -05003374
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003375 smb2_set_sparse(xid, tcon, cfile, inode, false);
3376 rc = 0;
3377
3378out:
Steve French779ede02019-03-13 01:41:49 -05003379 if (rc)
3380 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3381 tcon->ses->Suid, off, len, rc);
3382 else
3383 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3384 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003385
3386 free_xid(xid);
3387 return rc;
3388}
3389
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003390static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3391{
3392 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3393 struct cifsInodeInfo *cifsi;
3394 struct inode *inode;
3395 int rc = 0;
3396 struct file_allocated_range_buffer in_data, *out_data = NULL;
3397 u32 out_data_len;
3398 unsigned int xid;
3399
3400 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3401 return generic_file_llseek(file, offset, whence);
3402
3403 inode = d_inode(cfile->dentry);
3404 cifsi = CIFS_I(inode);
3405
3406 if (offset < 0 || offset >= i_size_read(inode))
3407 return -ENXIO;
3408
3409 xid = get_xid();
3410 /*
3411 * We need to be sure that all dirty pages are written as they
3412 * might fill holes on the server.
3413 * Note that we also MUST flush any written pages since at least
3414 * some servers (Windows2016) will not reflect recent writes in
3415 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3416 */
Aurelien Aptel86f740f2020-02-21 11:19:06 +01003417 wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003418 if (wrcfile) {
3419 filemap_write_and_wait(inode->i_mapping);
3420 smb2_flush_file(xid, tcon, &wrcfile->fid);
3421 cifsFileInfo_put(wrcfile);
3422 }
3423
3424 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3425 if (whence == SEEK_HOLE)
3426 offset = i_size_read(inode);
3427 goto lseek_exit;
3428 }
3429
3430 in_data.file_offset = cpu_to_le64(offset);
3431 in_data.length = cpu_to_le64(i_size_read(inode));
3432
3433 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3434 cfile->fid.volatile_fid,
3435 FSCTL_QUERY_ALLOCATED_RANGES, true,
3436 (char *)&in_data, sizeof(in_data),
3437 sizeof(struct file_allocated_range_buffer),
3438 (char **)&out_data, &out_data_len);
3439 if (rc == -E2BIG)
3440 rc = 0;
3441 if (rc)
3442 goto lseek_exit;
3443
3444 if (whence == SEEK_HOLE && out_data_len == 0)
3445 goto lseek_exit;
3446
3447 if (whence == SEEK_DATA && out_data_len == 0) {
3448 rc = -ENXIO;
3449 goto lseek_exit;
3450 }
3451
3452 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3453 rc = -EINVAL;
3454 goto lseek_exit;
3455 }
3456 if (whence == SEEK_DATA) {
3457 offset = le64_to_cpu(out_data->file_offset);
3458 goto lseek_exit;
3459 }
3460 if (offset < le64_to_cpu(out_data->file_offset))
3461 goto lseek_exit;
3462
3463 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3464
3465 lseek_exit:
3466 free_xid(xid);
3467 kfree(out_data);
3468 if (!rc)
3469 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3470 else
3471 return rc;
3472}
3473
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003474static int smb3_fiemap(struct cifs_tcon *tcon,
3475 struct cifsFileInfo *cfile,
3476 struct fiemap_extent_info *fei, u64 start, u64 len)
3477{
3478 unsigned int xid;
3479 struct file_allocated_range_buffer in_data, *out_data;
3480 u32 out_data_len;
3481 int i, num, rc, flags, last_blob;
3482 u64 next;
3483
Christoph Hellwig45dd0522020-05-23 09:30:14 +02003484 rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
Christoph Hellwigcddf8a22020-05-23 09:30:13 +02003485 if (rc)
3486 return rc;
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003487
3488 xid = get_xid();
3489 again:
3490 in_data.file_offset = cpu_to_le64(start);
3491 in_data.length = cpu_to_le64(len);
3492
3493 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3494 cfile->fid.volatile_fid,
3495 FSCTL_QUERY_ALLOCATED_RANGES, true,
3496 (char *)&in_data, sizeof(in_data),
3497 1024 * sizeof(struct file_allocated_range_buffer),
3498 (char **)&out_data, &out_data_len);
3499 if (rc == -E2BIG) {
3500 last_blob = 0;
3501 rc = 0;
3502 } else
3503 last_blob = 1;
3504 if (rc)
3505 goto out;
3506
Murphy Zhou979a2662020-03-14 11:38:31 +08003507 if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003508 rc = -EINVAL;
3509 goto out;
3510 }
3511 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3512 rc = -EINVAL;
3513 goto out;
3514 }
3515
3516 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3517 for (i = 0; i < num; i++) {
3518 flags = 0;
3519 if (i == num - 1 && last_blob)
3520 flags |= FIEMAP_EXTENT_LAST;
3521
3522 rc = fiemap_fill_next_extent(fei,
3523 le64_to_cpu(out_data[i].file_offset),
3524 le64_to_cpu(out_data[i].file_offset),
3525 le64_to_cpu(out_data[i].length),
3526 flags);
3527 if (rc < 0)
3528 goto out;
3529 if (rc == 1) {
3530 rc = 0;
3531 goto out;
3532 }
3533 }
3534
3535 if (!last_blob) {
3536 next = le64_to_cpu(out_data[num - 1].file_offset) +
3537 le64_to_cpu(out_data[num - 1].length);
3538 len = len - (next - start);
3539 start = next;
3540 goto again;
3541 }
3542
3543 out:
3544 free_xid(xid);
3545 kfree(out_data);
3546 return rc;
3547}
Steve French9ccf3212014-10-18 17:01:15 -05003548
Steve French31742c52014-08-17 08:38:47 -05003549static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3550 loff_t off, loff_t len)
3551{
3552 /* KEEP_SIZE already checked for by do_fallocate */
3553 if (mode & FALLOC_FL_PUNCH_HOLE)
3554 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05003555 else if (mode & FALLOC_FL_ZERO_RANGE) {
3556 if (mode & FALLOC_FL_KEEP_SIZE)
3557 return smb3_zero_range(file, tcon, off, len, true);
3558 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05003559 } else if (mode == FALLOC_FL_KEEP_SIZE)
3560 return smb3_simple_falloc(file, tcon, off, len, true);
3561 else if (mode == 0)
3562 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05003563
3564 return -EOPNOTSUPP;
3565}
3566
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003567static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003568smb2_downgrade_oplock(struct TCP_Server_Info *server,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003569 struct cifsInodeInfo *cinode, __u32 oplock,
3570 unsigned int epoch, bool *purge_cache)
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003571{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003572 server->ops->set_oplock_level(cinode, oplock, 0, NULL);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003573}
3574
3575static void
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003576smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3577 unsigned int epoch, bool *purge_cache);
3578
3579static void
3580smb3_downgrade_oplock(struct TCP_Server_Info *server,
3581 struct cifsInodeInfo *cinode, __u32 oplock,
3582 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003583{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003584 unsigned int old_state = cinode->oplock;
3585 unsigned int old_epoch = cinode->epoch;
3586 unsigned int new_state;
3587
3588 if (epoch > old_epoch) {
3589 smb21_set_oplock_level(cinode, oplock, 0, NULL);
3590 cinode->epoch = epoch;
3591 }
3592
3593 new_state = cinode->oplock;
3594 *purge_cache = false;
3595
3596 if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
3597 (new_state & CIFS_CACHE_READ_FLG) == 0)
3598 *purge_cache = true;
3599 else if (old_state == new_state && (epoch - old_epoch > 1))
3600 *purge_cache = true;
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003601}
3602
3603static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003604smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3605 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003606{
3607 oplock &= 0xFF;
3608 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3609 return;
3610 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003611 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003612 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3613 &cinode->vfs_inode);
3614 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003615 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003616 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3617 &cinode->vfs_inode);
3618 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3619 cinode->oplock = CIFS_CACHE_READ_FLG;
3620 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3621 &cinode->vfs_inode);
3622 } else
3623 cinode->oplock = 0;
3624}
3625
3626static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003627smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3628 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003629{
3630 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003631 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003632
3633 oplock &= 0xFF;
3634 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3635 return;
3636
Pavel Shilovskya016e272019-09-26 12:31:20 -07003637 /* Check if the server granted an oplock rather than a lease */
3638 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3639 return smb2_set_oplock_level(cinode, oplock, epoch,
3640 purge_cache);
3641
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003642 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003643 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003644 strcat(message, "R");
3645 }
3646 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003647 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003648 strcat(message, "H");
3649 }
3650 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003651 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003652 strcat(message, "W");
3653 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003654 if (!new_oplock)
3655 strncpy(message, "None", sizeof(message));
3656
3657 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003658 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3659 &cinode->vfs_inode);
3660}
3661
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003662static void
3663smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3664 unsigned int epoch, bool *purge_cache)
3665{
3666 unsigned int old_oplock = cinode->oplock;
3667
3668 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3669
3670 if (purge_cache) {
3671 *purge_cache = false;
3672 if (old_oplock == CIFS_CACHE_READ_FLG) {
3673 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3674 (epoch - cinode->epoch > 0))
3675 *purge_cache = true;
3676 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3677 (epoch - cinode->epoch > 1))
3678 *purge_cache = true;
3679 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3680 (epoch - cinode->epoch > 1))
3681 *purge_cache = true;
3682 else if (cinode->oplock == 0 &&
3683 (epoch - cinode->epoch > 0))
3684 *purge_cache = true;
3685 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
3686 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3687 (epoch - cinode->epoch > 0))
3688 *purge_cache = true;
3689 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3690 (epoch - cinode->epoch > 1))
3691 *purge_cache = true;
3692 }
3693 cinode->epoch = epoch;
3694 }
3695}
3696
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003697static bool
3698smb2_is_read_op(__u32 oplock)
3699{
3700 return oplock == SMB2_OPLOCK_LEVEL_II;
3701}
3702
3703static bool
3704smb21_is_read_op(__u32 oplock)
3705{
3706 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3707 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3708}
3709
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003710static __le32
3711map_oplock_to_lease(u8 oplock)
3712{
3713 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3714 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3715 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3716 return SMB2_LEASE_READ_CACHING;
3717 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3718 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3719 SMB2_LEASE_WRITE_CACHING;
3720 return 0;
3721}
3722
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003723static char *
3724smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3725{
3726 struct create_lease *buf;
3727
3728 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
3729 if (!buf)
3730 return NULL;
3731
Stefano Brivio729c0c92018-07-05 15:10:02 +02003732 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003733 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003734
3735 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3736 (struct create_lease, lcontext));
3737 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
3738 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3739 (struct create_lease, Name));
3740 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003741 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003742 buf->Name[0] = 'R';
3743 buf->Name[1] = 'q';
3744 buf->Name[2] = 'L';
3745 buf->Name[3] = 's';
3746 return (char *)buf;
3747}
3748
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003749static char *
3750smb3_create_lease_buf(u8 *lease_key, u8 oplock)
3751{
3752 struct create_lease_v2 *buf;
3753
3754 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
3755 if (!buf)
3756 return NULL;
3757
Stefano Brivio729c0c92018-07-05 15:10:02 +02003758 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003759 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3760
3761 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3762 (struct create_lease_v2, lcontext));
3763 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
3764 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3765 (struct create_lease_v2, Name));
3766 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003767 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003768 buf->Name[0] = 'R';
3769 buf->Name[1] = 'q';
3770 buf->Name[2] = 'L';
3771 buf->Name[3] = 's';
3772 return (char *)buf;
3773}
3774
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003775static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003776smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003777{
3778 struct create_lease *lc = (struct create_lease *)buf;
3779
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003780 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003781 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3782 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3783 return le32_to_cpu(lc->lcontext.LeaseState);
3784}
3785
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003786static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003787smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003788{
3789 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
3790
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003791 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003792 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3793 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003794 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02003795 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003796 return le32_to_cpu(lc->lcontext.LeaseState);
3797}
3798
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003799static unsigned int
3800smb2_wp_retry_size(struct inode *inode)
3801{
3802 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
3803 SMB2_MAX_BUFFER_SIZE);
3804}
3805
Pavel Shilovsky52755802014-08-18 20:49:57 +04003806static bool
3807smb2_dir_needs_close(struct cifsFileInfo *cfile)
3808{
3809 return !cfile->invalidHandle;
3810}
3811
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003812static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003813fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
Steve French2b2f7542019-06-07 15:16:10 -05003814 struct smb_rqst *old_rq, __le16 cipher_type)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003815{
3816 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003817 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003818
3819 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
3820 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
3821 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
3822 tr_hdr->Flags = cpu_to_le16(0x01);
Steve French63ca5652020-10-15 23:41:40 -05003823 if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
3824 (cipher_type == SMB2_ENCRYPTION_AES256_GCM))
Steve Frenchfd08f2d2020-10-15 00:25:02 -05003825 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05003826 else
Steve Frenchfd08f2d2020-10-15 00:25:02 -05003827 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003828 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003829}
3830
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003831/* We can not use the normal sg_set_buf() as we will sometimes pass a
3832 * stack object as buf.
3833 */
3834static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3835 unsigned int buflen)
3836{
Sebastien Tisserantee9d6612019-08-01 12:06:08 -05003837 void *addr;
3838 /*
3839 * VMAP_STACK (at least) puts stack into the vmalloc address space
3840 */
3841 if (is_vmalloc_addr(buf))
3842 addr = vmalloc_to_page(buf);
3843 else
3844 addr = virt_to_page(buf);
3845 sg_set_page(sg, addr, buflen, offset_in_page(buf));
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003846}
3847
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003848/* Assumes the first rqst has a transform header as the first iov.
3849 * I.e.
3850 * rqst[0].rq_iov[0] is transform header
3851 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
3852 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003853 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003854static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003855init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003856{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003857 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003858 struct scatterlist *sg;
3859 unsigned int i;
3860 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003861 unsigned int idx = 0;
3862 int skip;
3863
3864 sg_len = 1;
3865 for (i = 0; i < num_rqst; i++)
3866 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003867
3868 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
3869 if (!sg)
3870 return NULL;
3871
3872 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003873 for (i = 0; i < num_rqst; i++) {
3874 for (j = 0; j < rqst[i].rq_nvec; j++) {
3875 /*
3876 * The first rqst has a transform header where the
3877 * first 20 bytes are not part of the encrypted blob
3878 */
3879 skip = (i == 0) && (j == 0) ? 20 : 0;
3880 smb2_sg_set_buf(&sg[idx++],
3881 rqst[i].rq_iov[j].iov_base + skip,
3882 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10003883 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05003884
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003885 for (j = 0; j < rqst[i].rq_npages; j++) {
3886 unsigned int len, offset;
3887
3888 rqst_page_get_length(&rqst[i], j, &len, &offset);
3889 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
3890 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003891 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003892 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003893 return sg;
3894}
3895
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003896static int
3897smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
3898{
3899 struct cifs_ses *ses;
3900 u8 *ses_enc_key;
3901
3902 spin_lock(&cifs_tcp_ses_lock);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02003903 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
3904 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3905 if (ses->Suid == ses_id) {
3906 ses_enc_key = enc ? ses->smb3encryptionkey :
3907 ses->smb3decryptionkey;
3908 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
3909 spin_unlock(&cifs_tcp_ses_lock);
3910 return 0;
3911 }
3912 }
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003913 }
3914 spin_unlock(&cifs_tcp_ses_lock);
3915
3916 return 1;
3917}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003918/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003919 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
3920 * iov[0] - transform header (associate data),
3921 * iov[1-N] - SMB2 header and pages - data to encrypt.
3922 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003923 * untouched.
3924 */
3925static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003926crypt_message(struct TCP_Server_Info *server, int num_rqst,
3927 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003928{
3929 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003930 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003931 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003932 int rc = 0;
3933 struct scatterlist *sg;
3934 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003935 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003936 struct aead_request *req;
3937 char *iv;
3938 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003939 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003940 struct crypto_aead *tfm;
3941 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3942
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003943 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
3944 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003945 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003946 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003947 return 0;
3948 }
3949
3950 rc = smb3_crypto_aead_allocate(server);
3951 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003952 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003953 return rc;
3954 }
3955
3956 tfm = enc ? server->secmech.ccmaesencrypt :
3957 server->secmech.ccmaesdecrypt;
Steve French63ca5652020-10-15 23:41:40 -05003958
3959 if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
3960 rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
3961 else
3962 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
3963
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003964 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003965 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003966 return rc;
3967 }
3968
3969 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3970 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003971 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003972 return rc;
3973 }
3974
3975 req = aead_request_alloc(tfm, GFP_KERNEL);
3976 if (!req) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003977 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003978 return -ENOMEM;
3979 }
3980
3981 if (!enc) {
3982 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3983 crypt_len += SMB2_SIGNATURE_SIZE;
3984 }
3985
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003986 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003987 if (!sg) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003988 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003989 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003990 goto free_req;
3991 }
3992
3993 iv_len = crypto_aead_ivsize(tfm);
3994 iv = kzalloc(iv_len, GFP_KERNEL);
3995 if (!iv) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003996 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003997 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003998 goto free_sg;
3999 }
Steve French2b2f7542019-06-07 15:16:10 -05004000
Steve French63ca5652020-10-15 23:41:40 -05004001 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4002 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004003 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05004004 else {
4005 iv[0] = 3;
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004006 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05004007 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004008
4009 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
4010 aead_request_set_ad(req, assoc_data_len);
4011
4012 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01004013 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004014
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01004015 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
4016 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004017
4018 if (!rc && enc)
4019 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
4020
4021 kfree(iv);
4022free_sg:
4023 kfree(sg);
4024free_req:
4025 kfree(req);
4026 return rc;
4027}
4028
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004029void
4030smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004031{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004032 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004033
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004034 for (i = 0; i < num_rqst; i++) {
4035 if (rqst[i].rq_pages) {
4036 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
4037 put_page(rqst[i].rq_pages[j]);
4038 kfree(rqst[i].rq_pages);
4039 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004040 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004041}
4042
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004043/*
4044 * This function will initialize new_rq and encrypt the content.
4045 * The first entry, new_rq[0], only contains a single iov which contains
4046 * a smb2_transform_hdr and is pre-allocated by the caller.
4047 * This function then populates new_rq[1+] with the content from olq_rq[0+].
4048 *
4049 * The end result is an array of smb_rqst structures where the first structure
4050 * only contains a single iov for the transform header which we then can pass
4051 * to crypt_message().
4052 *
4053 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
4054 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
4055 */
4056static int
4057smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
4058 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004059{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004060 struct page **pages;
4061 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
4062 unsigned int npages;
4063 unsigned int orig_len = 0;
4064 int i, j;
4065 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004066
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004067 for (i = 1; i < num_rqst; i++) {
4068 npages = old_rq[i - 1].rq_npages;
4069 pages = kmalloc_array(npages, sizeof(struct page *),
4070 GFP_KERNEL);
4071 if (!pages)
4072 goto err_free;
4073
4074 new_rq[i].rq_pages = pages;
4075 new_rq[i].rq_npages = npages;
4076 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
4077 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
4078 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
4079 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
4080 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
4081
4082 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
4083
4084 for (j = 0; j < npages; j++) {
4085 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4086 if (!pages[j])
4087 goto err_free;
4088 }
4089
4090 /* copy pages form the old */
4091 for (j = 0; j < npages; j++) {
4092 char *dst, *src;
4093 unsigned int offset, len;
4094
4095 rqst_page_get_length(&new_rq[i], j, &len, &offset);
4096
4097 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
4098 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
4099
4100 memcpy(dst, src, len);
4101 kunmap(new_rq[i].rq_pages[j]);
4102 kunmap(old_rq[i - 1].rq_pages[j]);
4103 }
4104 }
4105
4106 /* fill the 1st iov with a transform header */
Steve French2b2f7542019-06-07 15:16:10 -05004107 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004108
4109 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02004110 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004111 if (rc)
4112 goto err_free;
4113
4114 return rc;
4115
4116err_free:
4117 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
4118 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004119}
4120
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004121static int
4122smb3_is_transform_hdr(void *buf)
4123{
4124 struct smb2_transform_hdr *trhdr = buf;
4125
4126 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
4127}
4128
4129static int
4130decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
4131 unsigned int buf_data_size, struct page **pages,
4132 unsigned int npages, unsigned int page_data_size)
4133{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004134 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004135 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004136 int rc;
4137
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004138 iov[0].iov_base = buf;
4139 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
4140 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
4141 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004142
4143 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004144 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004145 rqst.rq_pages = pages;
4146 rqst.rq_npages = npages;
4147 rqst.rq_pagesz = PAGE_SIZE;
4148 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
4149
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004150 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02004151 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004152
4153 if (rc)
4154 return rc;
4155
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004156 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004157
4158 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004159
4160 return rc;
4161}
4162
4163static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004164read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
4165 unsigned int npages, unsigned int len)
4166{
4167 int i;
4168 int length;
4169
4170 for (i = 0; i < npages; i++) {
4171 struct page *page = pages[i];
4172 size_t n;
4173
4174 n = len;
4175 if (len >= PAGE_SIZE) {
4176 /* enough data to fill the page */
4177 n = PAGE_SIZE;
4178 len -= n;
4179 } else {
4180 zero_user(page, len, PAGE_SIZE - len);
4181 len = 0;
4182 }
Long Li1dbe3462018-05-30 12:47:55 -07004183 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004184 if (length < 0)
4185 return length;
4186 server->total_read += length;
4187 }
4188
4189 return 0;
4190}
4191
4192static int
4193init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
4194 unsigned int cur_off, struct bio_vec **page_vec)
4195{
4196 struct bio_vec *bvec;
4197 int i;
4198
4199 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
4200 if (!bvec)
4201 return -ENOMEM;
4202
4203 for (i = 0; i < npages; i++) {
4204 bvec[i].bv_page = pages[i];
4205 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
4206 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
4207 data_size -= bvec[i].bv_len;
4208 }
4209
4210 if (data_size != 0) {
4211 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
4212 kfree(bvec);
4213 return -EIO;
4214 }
4215
4216 *page_vec = bvec;
4217 return 0;
4218}
4219
4220static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004221handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
4222 char *buf, unsigned int buf_len, struct page **pages,
4223 unsigned int npages, unsigned int page_data_size)
4224{
4225 unsigned int data_offset;
4226 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004227 unsigned int cur_off;
4228 unsigned int cur_page_idx;
4229 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004230 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10004231 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004232 struct bio_vec *bvec = NULL;
4233 struct iov_iter iter;
4234 struct kvec iov;
4235 int length;
Long Li74dcf412017-11-22 17:38:46 -07004236 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004237
4238 if (shdr->Command != SMB2_READ) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004239 cifs_server_dbg(VFS, "only big read responses are supported\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004240 return -ENOTSUPP;
4241 }
4242
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004243 if (server->ops->is_session_expired &&
4244 server->ops->is_session_expired(buf)) {
4245 cifs_reconnect(server);
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004246 return -1;
4247 }
4248
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004249 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08004250 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004251 return -1;
4252
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004253 /* set up first two iov to get credits */
4254 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004255 rdata->iov[0].iov_len = 0;
4256 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004257 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004258 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004259 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4260 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4261 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4262 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4263
4264 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004265 if (rdata->result != 0) {
4266 cifs_dbg(FYI, "%s: server returned error %d\n",
4267 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004268 /* normal error on read response */
4269 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004270 return 0;
4271 }
4272
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004273 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07004274#ifdef CONFIG_CIFS_SMB_DIRECT
4275 use_rdma_mr = rdata->mr;
4276#endif
4277 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004278
4279 if (data_offset < server->vals->read_rsp_size) {
4280 /*
4281 * win2k8 sometimes sends an offset of 0 when the read
4282 * is beyond the EOF. Treat it as if the data starts just after
4283 * the header.
4284 */
4285 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4286 __func__, data_offset);
4287 data_offset = server->vals->read_rsp_size;
4288 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4289 /* data_offset is beyond the end of smallbuf */
4290 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4291 __func__, data_offset);
4292 rdata->result = -EIO;
4293 dequeue_mid(mid, rdata->result);
4294 return 0;
4295 }
4296
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004297 pad_len = data_offset - server->vals->read_rsp_size;
4298
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004299 if (buf_len <= data_offset) {
4300 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004301 cur_page_idx = pad_len / PAGE_SIZE;
4302 cur_off = pad_len % PAGE_SIZE;
4303
4304 if (cur_page_idx != 0) {
4305 /* data offset is beyond the 1st page of response */
4306 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4307 __func__, data_offset);
4308 rdata->result = -EIO;
4309 dequeue_mid(mid, rdata->result);
4310 return 0;
4311 }
4312
4313 if (data_len > page_data_size - pad_len) {
4314 /* data_len is corrupt -- discard frame */
4315 rdata->result = -EIO;
4316 dequeue_mid(mid, rdata->result);
4317 return 0;
4318 }
4319
4320 rdata->result = init_read_bvec(pages, npages, page_data_size,
4321 cur_off, &bvec);
4322 if (rdata->result != 0) {
4323 dequeue_mid(mid, rdata->result);
4324 return 0;
4325 }
4326
David Howellsaa563d72018-10-20 00:57:56 +01004327 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004328 } else if (buf_len >= data_offset + data_len) {
4329 /* read response payload is in buf */
4330 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
4331 iov.iov_base = buf + data_offset;
4332 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01004333 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004334 } else {
4335 /* read response payload cannot be in both buf and pages */
4336 WARN_ONCE(1, "buf can not contain only a part of read data");
4337 rdata->result = -EIO;
4338 dequeue_mid(mid, rdata->result);
4339 return 0;
4340 }
4341
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004342 length = rdata->copy_into_pages(server, rdata, &iter);
4343
4344 kfree(bvec);
4345
4346 if (length < 0)
4347 return length;
4348
4349 dequeue_mid(mid, false);
4350 return length;
4351}
4352
Steve French35cf94a2019-09-07 01:09:49 -05004353struct smb2_decrypt_work {
4354 struct work_struct decrypt;
4355 struct TCP_Server_Info *server;
4356 struct page **ppages;
4357 char *buf;
4358 unsigned int npages;
4359 unsigned int len;
4360};
4361
4362
4363static void smb2_decrypt_offload(struct work_struct *work)
4364{
4365 struct smb2_decrypt_work *dw = container_of(work,
4366 struct smb2_decrypt_work, decrypt);
4367 int i, rc;
4368 struct mid_q_entry *mid;
4369
4370 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
4371 dw->ppages, dw->npages, dw->len);
4372 if (rc) {
4373 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4374 goto free_pages;
4375 }
4376
Steve French22553972019-09-13 16:47:31 -05004377 dw->server->lstrp = jiffies;
Steve French35cf94a2019-09-07 01:09:49 -05004378 mid = smb2_find_mid(dw->server, dw->buf);
4379 if (mid == NULL)
4380 cifs_dbg(FYI, "mid not found\n");
4381 else {
4382 mid->decrypted = true;
4383 rc = handle_read_data(dw->server, mid, dw->buf,
4384 dw->server->vals->read_rsp_size,
4385 dw->ppages, dw->npages, dw->len);
Steve French22553972019-09-13 16:47:31 -05004386 mid->callback(mid);
4387 cifs_mid_q_entry_release(mid);
Steve French35cf94a2019-09-07 01:09:49 -05004388 }
4389
Steve French35cf94a2019-09-07 01:09:49 -05004390free_pages:
4391 for (i = dw->npages-1; i >= 0; i--)
4392 put_page(dw->ppages[i]);
4393
4394 kfree(dw->ppages);
4395 cifs_small_buf_release(dw->buf);
Steve Frencha08d8972019-10-26 16:00:44 -05004396 kfree(dw);
Steve French35cf94a2019-09-07 01:09:49 -05004397}
4398
4399
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004400static int
Steve French35cf94a2019-09-07 01:09:49 -05004401receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4402 int *num_mids)
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004403{
4404 char *buf = server->smallbuf;
4405 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4406 unsigned int npages;
4407 struct page **pages;
4408 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004409 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004410 int rc;
4411 int i = 0;
Steve French35cf94a2019-09-07 01:09:49 -05004412 struct smb2_decrypt_work *dw;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004413
Steve French35cf94a2019-09-07 01:09:49 -05004414 *num_mids = 1;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004415 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004416 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4417
4418 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4419 if (rc < 0)
4420 return rc;
4421 server->total_read += rc;
4422
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004423 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11004424 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004425 npages = DIV_ROUND_UP(len, PAGE_SIZE);
4426
4427 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
4428 if (!pages) {
4429 rc = -ENOMEM;
4430 goto discard_data;
4431 }
4432
4433 for (; i < npages; i++) {
4434 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4435 if (!pages[i]) {
4436 rc = -ENOMEM;
4437 goto discard_data;
4438 }
4439 }
4440
4441 /* read read data into pages */
4442 rc = read_data_into_pages(server, pages, npages, len);
4443 if (rc)
4444 goto free_pages;
4445
Pavel Shilovsky350be252017-04-10 10:31:33 -07004446 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004447 if (rc)
4448 goto free_pages;
4449
Steve French35cf94a2019-09-07 01:09:49 -05004450 /*
4451 * For large reads, offload to different thread for better performance,
4452 * use more cores decrypting which can be expensive
4453 */
4454
Steve French10328c42019-09-09 13:30:15 -05004455 if ((server->min_offload) && (server->in_flight > 1) &&
Steve French563317e2019-09-08 23:22:02 -05004456 (server->pdu_size >= server->min_offload)) {
Steve French35cf94a2019-09-07 01:09:49 -05004457 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4458 if (dw == NULL)
4459 goto non_offloaded_decrypt;
4460
4461 dw->buf = server->smallbuf;
4462 server->smallbuf = (char *)cifs_small_buf_get();
4463
4464 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4465
4466 dw->npages = npages;
4467 dw->server = server;
4468 dw->ppages = pages;
4469 dw->len = len;
Steve Frencha08d8972019-10-26 16:00:44 -05004470 queue_work(decrypt_wq, &dw->decrypt);
Steve French35cf94a2019-09-07 01:09:49 -05004471 *num_mids = 0; /* worker thread takes care of finding mid */
4472 return -1;
4473 }
4474
4475non_offloaded_decrypt:
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004476 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004477 pages, npages, len);
4478 if (rc)
4479 goto free_pages;
4480
4481 *mid = smb2_find_mid(server, buf);
4482 if (*mid == NULL)
4483 cifs_dbg(FYI, "mid not found\n");
4484 else {
4485 cifs_dbg(FYI, "mid found\n");
4486 (*mid)->decrypted = true;
4487 rc = handle_read_data(server, *mid, buf,
4488 server->vals->read_rsp_size,
4489 pages, npages, len);
4490 }
4491
4492free_pages:
4493 for (i = i - 1; i >= 0; i--)
4494 put_page(pages[i]);
4495 kfree(pages);
4496 return rc;
4497discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07004498 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004499 goto free_pages;
4500}
4501
4502static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004503receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004504 struct mid_q_entry **mids, char **bufs,
4505 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004506{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004507 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004508 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004509 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004510 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004511 unsigned int buf_size;
4512 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004513 int next_is_large;
4514 char *next_buffer = NULL;
4515
4516 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004517
4518 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004519 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004520 server->large_buf = true;
4521 memcpy(server->bigbuf, buf, server->total_read);
4522 buf = server->bigbuf;
4523 }
4524
4525 /* now read the rest */
4526 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004527 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004528 if (length < 0)
4529 return length;
4530 server->total_read += length;
4531
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004532 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004533 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
4534 if (length)
4535 return length;
4536
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004537 next_is_large = server->large_buf;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004538one_more:
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004539 shdr = (struct smb2_sync_hdr *)buf;
4540 if (shdr->NextCommand) {
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004541 if (next_is_large)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004542 next_buffer = (char *)cifs_buf_get();
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004543 else
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004544 next_buffer = (char *)cifs_small_buf_get();
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004545 memcpy(next_buffer,
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004546 buf + le32_to_cpu(shdr->NextCommand),
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004547 pdu_length - le32_to_cpu(shdr->NextCommand));
4548 }
4549
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004550 mid_entry = smb2_find_mid(server, buf);
4551 if (mid_entry == NULL)
4552 cifs_dbg(FYI, "mid not found\n");
4553 else {
4554 cifs_dbg(FYI, "mid found\n");
4555 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004556 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004557 }
4558
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004559 if (*num_mids >= MAX_COMPOUND) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004560 cifs_server_dbg(VFS, "too many PDUs in compound\n");
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004561 return -1;
4562 }
4563 bufs[*num_mids] = buf;
4564 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004565
4566 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004567 ret = mid_entry->handle(server, mid_entry);
4568 else
4569 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004570
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004571 if (ret == 0 && shdr->NextCommand) {
4572 pdu_length -= le32_to_cpu(shdr->NextCommand);
4573 server->large_buf = next_is_large;
4574 if (next_is_large)
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004575 server->bigbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004576 else
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004577 server->smallbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004578 goto one_more;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004579 } else if (ret != 0) {
4580 /*
4581 * ret != 0 here means that we didn't get to handle_mid() thus
4582 * server->smallbuf and server->bigbuf are still valid. We need
4583 * to free next_buffer because it is not going to be used
4584 * anywhere.
4585 */
4586 if (next_is_large)
4587 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4588 else
4589 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004590 }
4591
4592 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004593}
4594
4595static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004596smb3_receive_transform(struct TCP_Server_Info *server,
4597 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004598{
4599 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004600 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004601 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4602 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4603
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004604 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004605 sizeof(struct smb2_sync_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004606 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004607 pdu_length);
4608 cifs_reconnect(server);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004609 return -ECONNABORTED;
4610 }
4611
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004612 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004613 cifs_server_dbg(VFS, "Transform message is broken\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004614 cifs_reconnect(server);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004615 return -ECONNABORTED;
4616 }
4617
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004618 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004619 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
Steve French35cf94a2019-09-07 01:09:49 -05004620 return receive_encrypted_read(server, &mids[0], num_mids);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004621 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004622
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004623 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004624}
4625
4626int
4627smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4628{
4629 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
4630
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004631 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004632 NULL, 0, 0);
4633}
4634
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004635static int
4636smb2_next_header(char *buf)
4637{
4638 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
4639 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
4640
4641 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
4642 return sizeof(struct smb2_transform_hdr) +
4643 le32_to_cpu(t_hdr->OriginalMessageSize);
4644
4645 return le32_to_cpu(hdr->NextCommand);
4646}
4647
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004648static int
4649smb2_make_node(unsigned int xid, struct inode *inode,
4650 struct dentry *dentry, struct cifs_tcon *tcon,
4651 char *full_path, umode_t mode, dev_t dev)
4652{
4653 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4654 int rc = -EPERM;
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004655 FILE_ALL_INFO *buf = NULL;
Aurelien Aptel7c065142020-06-04 17:23:55 +02004656 struct cifs_io_parms io_parms = {0};
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004657 __u32 oplock = 0;
4658 struct cifs_fid fid;
4659 struct cifs_open_parms oparms;
4660 unsigned int bytes_written;
4661 struct win_dev *pdev;
4662 struct kvec iov[2];
4663
4664 /*
4665 * Check if mounted with mount parm 'sfu' mount parm.
4666 * SFU emulation should work with all servers, but only
4667 * supports block and char device (no socket & fifo),
4668 * and was used by default in earlier versions of Windows
4669 */
4670 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
4671 goto out;
4672
4673 /*
4674 * TODO: Add ability to create instead via reparse point. Windows (e.g.
4675 * their current NFS server) uses this approach to expose special files
4676 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
4677 */
4678
4679 if (!S_ISCHR(mode) && !S_ISBLK(mode))
4680 goto out;
4681
4682 cifs_dbg(FYI, "sfu compat create special file\n");
4683
4684 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
4685 if (buf == NULL) {
4686 rc = -ENOMEM;
4687 goto out;
4688 }
4689
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004690 oparms.tcon = tcon;
4691 oparms.cifs_sb = cifs_sb;
4692 oparms.desired_access = GENERIC_WRITE;
Amir Goldstein0f060932020-02-03 21:46:43 +02004693 oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
4694 CREATE_OPTION_SPECIAL);
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004695 oparms.disposition = FILE_CREATE;
4696 oparms.path = full_path;
4697 oparms.fid = &fid;
4698 oparms.reconnect = false;
4699
4700 if (tcon->ses->server->oplocks)
4701 oplock = REQ_OPLOCK;
4702 else
4703 oplock = 0;
4704 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
4705 if (rc)
4706 goto out;
4707
4708 /*
4709 * BB Do not bother to decode buf since no local inode yet to put
4710 * timestamps in, but we can reuse it safely.
4711 */
4712
4713 pdev = (struct win_dev *)buf;
4714 io_parms.pid = current->tgid;
4715 io_parms.tcon = tcon;
4716 io_parms.offset = 0;
4717 io_parms.length = sizeof(struct win_dev);
4718 iov[1].iov_base = buf;
4719 iov[1].iov_len = sizeof(struct win_dev);
4720 if (S_ISCHR(mode)) {
4721 memcpy(pdev->type, "IntxCHR", 8);
4722 pdev->major = cpu_to_le64(MAJOR(dev));
4723 pdev->minor = cpu_to_le64(MINOR(dev));
4724 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4725 &bytes_written, iov, 1);
4726 } else if (S_ISBLK(mode)) {
4727 memcpy(pdev->type, "IntxBLK", 8);
4728 pdev->major = cpu_to_le64(MAJOR(dev));
4729 pdev->minor = cpu_to_le64(MINOR(dev));
4730 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4731 &bytes_written, iov, 1);
4732 }
4733 tcon->ses->server->ops->close(xid, tcon, &fid);
4734 d_drop(dentry);
4735
4736 /* FIXME: add code here to set EAs */
4737out:
4738 kfree(buf);
4739 return rc;
4740}
4741
4742
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004743struct smb_version_operations smb20_operations = {
4744 .compare_fids = smb2_compare_fids,
4745 .setup_request = smb2_setup_request,
4746 .setup_async_request = smb2_setup_async_request,
4747 .check_receive = smb2_check_receive,
4748 .add_credits = smb2_add_credits,
4749 .set_credits = smb2_set_credits,
4750 .get_credits_field = smb2_get_credits_field,
4751 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004752 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004753 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004754 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004755 .read_data_offset = smb2_read_data_offset,
4756 .read_data_length = smb2_read_data_length,
4757 .map_error = map_smb2_to_linux_error,
4758 .find_mid = smb2_find_mid,
4759 .check_message = smb2_check_message,
4760 .dump_detail = smb2_dump_detail,
4761 .clear_stats = smb2_clear_stats,
4762 .print_stats = smb2_print_stats,
4763 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004764 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004765 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004766 .need_neg = smb2_need_neg,
4767 .negotiate = smb2_negotiate,
4768 .negotiate_wsize = smb2_negotiate_wsize,
4769 .negotiate_rsize = smb2_negotiate_rsize,
4770 .sess_setup = SMB2_sess_setup,
4771 .logoff = SMB2_logoff,
4772 .tree_connect = SMB2_tcon,
4773 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004774 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004775 .is_path_accessible = smb2_is_path_accessible,
4776 .can_echo = smb2_can_echo,
4777 .echo = SMB2_echo,
4778 .query_path_info = smb2_query_path_info,
4779 .get_srv_inum = smb2_get_srv_inum,
4780 .query_file_info = smb2_query_file_info,
4781 .set_path_size = smb2_set_path_size,
4782 .set_file_size = smb2_set_file_size,
4783 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004784 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004785 .mkdir = smb2_mkdir,
4786 .mkdir_setinfo = smb2_mkdir_setinfo,
4787 .rmdir = smb2_rmdir,
4788 .unlink = smb2_unlink,
4789 .rename = smb2_rename_path,
4790 .create_hardlink = smb2_create_hardlink,
4791 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01004792 .query_mf_symlink = smb3_query_mf_symlink,
4793 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004794 .open = smb2_open_file,
4795 .set_fid = smb2_set_fid,
4796 .close = smb2_close_file,
4797 .flush = smb2_flush_file,
4798 .async_readv = smb2_async_readv,
4799 .async_writev = smb2_async_writev,
4800 .sync_read = smb2_sync_read,
4801 .sync_write = smb2_sync_write,
4802 .query_dir_first = smb2_query_dir_first,
4803 .query_dir_next = smb2_query_dir_next,
4804 .close_dir = smb2_close_dir,
4805 .calc_smb_size = smb2_calc_size,
4806 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004807 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004808 .oplock_response = smb2_oplock_response,
4809 .queryfs = smb2_queryfs,
4810 .mand_lock = smb2_mand_lock,
4811 .mand_unlock_range = smb2_unlock_range,
4812 .push_mand_locks = smb2_push_mandatory_locks,
4813 .get_lease_key = smb2_get_lease_key,
4814 .set_lease_key = smb2_set_lease_key,
4815 .new_lease_key = smb2_new_lease_key,
4816 .calc_signature = smb2_calc_signature,
4817 .is_read_op = smb2_is_read_op,
4818 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004819 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004820 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004821 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004822 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004823 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004824 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304825 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004826#ifdef CONFIG_CIFS_XATTR
4827 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004828 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004829#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004830 .get_acl = get_smb2_acl,
4831 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004832 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004833 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004834 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004835 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004836 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004837 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00004838 .is_status_io_timeout = smb2_is_status_io_timeout,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004839};
4840
Steve French1080ef72011-02-24 18:07:19 +00004841struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004842 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004843 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04004844 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004845 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04004846 .add_credits = smb2_add_credits,
4847 .set_credits = smb2_set_credits,
4848 .get_credits_field = smb2_get_credits_field,
4849 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004850 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004851 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004852 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004853 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004854 .read_data_offset = smb2_read_data_offset,
4855 .read_data_length = smb2_read_data_length,
4856 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004857 .find_mid = smb2_find_mid,
4858 .check_message = smb2_check_message,
4859 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04004860 .clear_stats = smb2_clear_stats,
4861 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004862 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004863 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004864 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04004865 .need_neg = smb2_need_neg,
4866 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07004867 .negotiate_wsize = smb2_negotiate_wsize,
4868 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04004869 .sess_setup = SMB2_sess_setup,
4870 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04004871 .tree_connect = SMB2_tcon,
4872 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004873 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04004874 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04004875 .can_echo = smb2_can_echo,
4876 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04004877 .query_path_info = smb2_query_path_info,
4878 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07004879 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004880 .set_path_size = smb2_set_path_size,
4881 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004882 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004883 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04004884 .mkdir = smb2_mkdir,
4885 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04004886 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07004887 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004888 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07004889 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004890 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004891 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004892 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07004893 .open = smb2_open_file,
4894 .set_fid = smb2_set_fid,
4895 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07004896 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004897 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07004898 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004899 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004900 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004901 .query_dir_first = smb2_query_dir_first,
4902 .query_dir_next = smb2_query_dir_next,
4903 .close_dir = smb2_close_dir,
4904 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07004905 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004906 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004907 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07004908 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07004909 .mand_lock = smb2_mand_lock,
4910 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07004911 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07004912 .get_lease_key = smb2_get_lease_key,
4913 .set_lease_key = smb2_set_lease_key,
4914 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06004915 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004916 .is_read_op = smb21_is_read_op,
4917 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004918 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004919 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004920 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004921 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004922 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05004923 .enum_snapshots = smb3_enum_snapshots,
Steve French2c6251a2020-02-12 22:37:08 -06004924 .notify = smb3_notify,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004925 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304926 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004927#ifdef CONFIG_CIFS_XATTR
4928 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004929 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004930#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004931 .get_acl = get_smb2_acl,
4932 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004933 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004934 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004935 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004936 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004937 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004938 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00004939 .is_status_io_timeout = smb2_is_status_io_timeout,
Steve French38107d42012-12-08 22:08:06 -06004940};
4941
Steve French38107d42012-12-08 22:08:06 -06004942struct smb_version_operations smb30_operations = {
4943 .compare_fids = smb2_compare_fids,
4944 .setup_request = smb2_setup_request,
4945 .setup_async_request = smb2_setup_async_request,
4946 .check_receive = smb2_check_receive,
4947 .add_credits = smb2_add_credits,
4948 .set_credits = smb2_set_credits,
4949 .get_credits_field = smb2_get_credits_field,
4950 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004951 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004952 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06004953 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004954 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06004955 .read_data_offset = smb2_read_data_offset,
4956 .read_data_length = smb2_read_data_length,
4957 .map_error = map_smb2_to_linux_error,
4958 .find_mid = smb2_find_mid,
4959 .check_message = smb2_check_message,
4960 .dump_detail = smb2_dump_detail,
4961 .clear_stats = smb2_clear_stats,
4962 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05004963 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06004964 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004965 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004966 .downgrade_oplock = smb3_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06004967 .need_neg = smb2_need_neg,
4968 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004969 .negotiate_wsize = smb3_negotiate_wsize,
4970 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06004971 .sess_setup = SMB2_sess_setup,
4972 .logoff = SMB2_logoff,
4973 .tree_connect = SMB2_tcon,
4974 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05004975 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06004976 .is_path_accessible = smb2_is_path_accessible,
4977 .can_echo = smb2_can_echo,
4978 .echo = SMB2_echo,
4979 .query_path_info = smb2_query_path_info,
4980 .get_srv_inum = smb2_get_srv_inum,
4981 .query_file_info = smb2_query_file_info,
4982 .set_path_size = smb2_set_path_size,
4983 .set_file_size = smb2_set_file_size,
4984 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004985 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06004986 .mkdir = smb2_mkdir,
4987 .mkdir_setinfo = smb2_mkdir_setinfo,
4988 .rmdir = smb2_rmdir,
4989 .unlink = smb2_unlink,
4990 .rename = smb2_rename_path,
4991 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004992 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004993 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004994 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06004995 .open = smb2_open_file,
4996 .set_fid = smb2_set_fid,
4997 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06004998 .close_getattr = smb2_close_getattr,
Steve French38107d42012-12-08 22:08:06 -06004999 .flush = smb2_flush_file,
5000 .async_readv = smb2_async_readv,
5001 .async_writev = smb2_async_writev,
5002 .sync_read = smb2_sync_read,
5003 .sync_write = smb2_sync_write,
5004 .query_dir_first = smb2_query_dir_first,
5005 .query_dir_next = smb2_query_dir_next,
5006 .close_dir = smb2_close_dir,
5007 .calc_smb_size = smb2_calc_size,
5008 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005009 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06005010 .oplock_response = smb2_oplock_response,
5011 .queryfs = smb2_queryfs,
5012 .mand_lock = smb2_mand_lock,
5013 .mand_unlock_range = smb2_unlock_range,
5014 .push_mand_locks = smb2_push_mandatory_locks,
5015 .get_lease_key = smb2_get_lease_key,
5016 .set_lease_key = smb2_set_lease_key,
5017 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06005018 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06005019 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05005020 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005021 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04005022 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005023 .create_lease_buf = smb3_create_lease_buf,
5024 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005025 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05005026 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06005027 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04005028 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04005029 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05005030 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05005031 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06005032 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07005033 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005034 .is_transform_hdr = smb3_is_transform_hdr,
5035 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005036 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305037 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005038#ifdef CONFIG_CIFS_XATTR
5039 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005040 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005041#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05005042 .get_acl = get_smb2_acl,
5043 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05005044 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005045 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005046 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005047 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005048 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005049 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005050 .is_status_io_timeout = smb2_is_status_io_timeout,
Steve French1080ef72011-02-24 18:07:19 +00005051};
5052
Steve Frenchaab18932015-06-23 23:37:11 -05005053struct smb_version_operations smb311_operations = {
5054 .compare_fids = smb2_compare_fids,
5055 .setup_request = smb2_setup_request,
5056 .setup_async_request = smb2_setup_async_request,
5057 .check_receive = smb2_check_receive,
5058 .add_credits = smb2_add_credits,
5059 .set_credits = smb2_set_credits,
5060 .get_credits_field = smb2_get_credits_field,
5061 .get_credits = smb2_get_credits,
5062 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08005063 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05005064 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005065 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05005066 .read_data_offset = smb2_read_data_offset,
5067 .read_data_length = smb2_read_data_length,
5068 .map_error = map_smb2_to_linux_error,
5069 .find_mid = smb2_find_mid,
5070 .check_message = smb2_check_message,
5071 .dump_detail = smb2_dump_detail,
5072 .clear_stats = smb2_clear_stats,
5073 .print_stats = smb2_print_stats,
5074 .dump_share_caps = smb2_dump_share_caps,
5075 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005076 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07005077 .downgrade_oplock = smb3_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05005078 .need_neg = smb2_need_neg,
5079 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05005080 .negotiate_wsize = smb3_negotiate_wsize,
5081 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05005082 .sess_setup = SMB2_sess_setup,
5083 .logoff = SMB2_logoff,
5084 .tree_connect = SMB2_tcon,
5085 .tree_disconnect = SMB2_tdis,
5086 .qfs_tcon = smb3_qfs_tcon,
5087 .is_path_accessible = smb2_is_path_accessible,
5088 .can_echo = smb2_can_echo,
5089 .echo = SMB2_echo,
5090 .query_path_info = smb2_query_path_info,
5091 .get_srv_inum = smb2_get_srv_inum,
5092 .query_file_info = smb2_query_file_info,
5093 .set_path_size = smb2_set_path_size,
5094 .set_file_size = smb2_set_file_size,
5095 .set_file_info = smb2_set_file_info,
5096 .set_compression = smb2_set_compression,
5097 .mkdir = smb2_mkdir,
5098 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05005099 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05005100 .rmdir = smb2_rmdir,
5101 .unlink = smb2_unlink,
5102 .rename = smb2_rename_path,
5103 .create_hardlink = smb2_create_hardlink,
5104 .query_symlink = smb2_query_symlink,
5105 .query_mf_symlink = smb3_query_mf_symlink,
5106 .create_mf_symlink = smb3_create_mf_symlink,
5107 .open = smb2_open_file,
5108 .set_fid = smb2_set_fid,
5109 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06005110 .close_getattr = smb2_close_getattr,
Steve Frenchaab18932015-06-23 23:37:11 -05005111 .flush = smb2_flush_file,
5112 .async_readv = smb2_async_readv,
5113 .async_writev = smb2_async_writev,
5114 .sync_read = smb2_sync_read,
5115 .sync_write = smb2_sync_write,
5116 .query_dir_first = smb2_query_dir_first,
5117 .query_dir_next = smb2_query_dir_next,
5118 .close_dir = smb2_close_dir,
5119 .calc_smb_size = smb2_calc_size,
5120 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005121 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05005122 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05005123 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05005124 .mand_lock = smb2_mand_lock,
5125 .mand_unlock_range = smb2_unlock_range,
5126 .push_mand_locks = smb2_push_mandatory_locks,
5127 .get_lease_key = smb2_get_lease_key,
5128 .set_lease_key = smb2_set_lease_key,
5129 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06005130 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05005131 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05005132 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05005133 .is_read_op = smb21_is_read_op,
5134 .set_oplock_level = smb3_set_oplock_level,
5135 .create_lease_buf = smb3_create_lease_buf,
5136 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005137 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07005138 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05005139/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
5140 .wp_retry_size = smb2_wp_retry_size,
5141 .dir_needs_close = smb2_dir_needs_close,
5142 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05005143 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06005144 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07005145 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005146 .is_transform_hdr = smb3_is_transform_hdr,
5147 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005148 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305149 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005150#ifdef CONFIG_CIFS_XATTR
5151 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005152 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005153#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10005154 .get_acl = get_smb2_acl,
5155 .get_acl_by_fid = get_smb2_acl_by_fid,
5156 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005157 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005158 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005159 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005160 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005161 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005162 .is_status_io_timeout = smb2_is_status_io_timeout,
Steve Frenchaab18932015-06-23 23:37:11 -05005163};
Steve Frenchaab18932015-06-23 23:37:11 -05005164
Steve Frenchdd446b12012-11-28 23:21:06 -06005165struct smb_version_values smb20_values = {
5166 .version_string = SMB20_VERSION_STRING,
5167 .protocol_id = SMB20_PROT_ID,
5168 .req_capabilities = 0, /* MBZ */
5169 .large_lock_type = 0,
5170 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5171 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5172 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005173 .header_size = sizeof(struct smb2_sync_hdr),
5174 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06005175 .max_header_size = MAX_SMB2_HDR_SIZE,
5176 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5177 .lock_cmd = SMB2_LOCK,
5178 .cap_unix = 0,
5179 .cap_nt_find = SMB2_NT_FIND,
5180 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005181 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5182 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005183 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06005184};
5185
Steve French1080ef72011-02-24 18:07:19 +00005186struct smb_version_values smb21_values = {
5187 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005188 .protocol_id = SMB21_PROT_ID,
5189 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
5190 .large_lock_type = 0,
5191 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5192 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5193 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005194 .header_size = sizeof(struct smb2_sync_hdr),
5195 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005196 .max_header_size = MAX_SMB2_HDR_SIZE,
5197 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5198 .lock_cmd = SMB2_LOCK,
5199 .cap_unix = 0,
5200 .cap_nt_find = SMB2_NT_FIND,
5201 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005202 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5203 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005204 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05005205};
5206
Steve French9764c022017-09-17 10:41:35 -05005207struct smb_version_values smb3any_values = {
5208 .version_string = SMB3ANY_VERSION_STRING,
5209 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005210 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005211 .large_lock_type = 0,
5212 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5213 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5214 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005215 .header_size = sizeof(struct smb2_sync_hdr),
5216 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005217 .max_header_size = MAX_SMB2_HDR_SIZE,
5218 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5219 .lock_cmd = SMB2_LOCK,
5220 .cap_unix = 0,
5221 .cap_nt_find = SMB2_NT_FIND,
5222 .cap_large_files = SMB2_LARGE_FILES,
5223 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5224 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5225 .create_lease_size = sizeof(struct create_lease_v2),
5226};
5227
5228struct smb_version_values smbdefault_values = {
5229 .version_string = SMBDEFAULT_VERSION_STRING,
5230 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005231 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005232 .large_lock_type = 0,
5233 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5234 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5235 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005236 .header_size = sizeof(struct smb2_sync_hdr),
5237 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005238 .max_header_size = MAX_SMB2_HDR_SIZE,
5239 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5240 .lock_cmd = SMB2_LOCK,
5241 .cap_unix = 0,
5242 .cap_nt_find = SMB2_NT_FIND,
5243 .cap_large_files = SMB2_LARGE_FILES,
5244 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5245 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5246 .create_lease_size = sizeof(struct create_lease_v2),
5247};
5248
Steve Frenche4aa25e2012-10-01 12:26:22 -05005249struct smb_version_values smb30_values = {
5250 .version_string = SMB30_VERSION_STRING,
5251 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005252 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07005253 .large_lock_type = 0,
5254 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5255 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5256 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005257 .header_size = sizeof(struct smb2_sync_hdr),
5258 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04005259 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005260 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005261 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04005262 .cap_unix = 0,
5263 .cap_nt_find = SMB2_NT_FIND,
5264 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005265 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5266 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005267 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00005268};
Steve French20b6d8b2013-06-12 22:48:41 -05005269
5270struct smb_version_values smb302_values = {
5271 .version_string = SMB302_VERSION_STRING,
5272 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005273 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05005274 .large_lock_type = 0,
5275 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5276 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5277 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005278 .header_size = sizeof(struct smb2_sync_hdr),
5279 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05005280 .max_header_size = MAX_SMB2_HDR_SIZE,
5281 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5282 .lock_cmd = SMB2_LOCK,
5283 .cap_unix = 0,
5284 .cap_nt_find = SMB2_NT_FIND,
5285 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005286 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5287 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005288 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05005289};
Steve French5f7fbf72014-12-17 22:52:58 -06005290
Steve French5f7fbf72014-12-17 22:52:58 -06005291struct smb_version_values smb311_values = {
5292 .version_string = SMB311_VERSION_STRING,
5293 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005294 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06005295 .large_lock_type = 0,
5296 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5297 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5298 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005299 .header_size = sizeof(struct smb2_sync_hdr),
5300 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06005301 .max_header_size = MAX_SMB2_HDR_SIZE,
5302 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5303 .lock_cmd = SMB2_LOCK,
5304 .cap_unix = 0,
5305 .cap_nt_find = SMB2_NT_FIND,
5306 .cap_large_files = SMB2_LARGE_FILES,
5307 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5308 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5309 .create_lease_size = sizeof(struct create_lease_v2),
5310};