blob: 1654d50b8aef344ff68a9d4b82f73b9db8002be0 [file] [log] [blame]
Steve French1080ef72011-02-24 18:07:19 +00001/*
2 * SMB2 version specific operations
3 *
4 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
5 *
6 * This library is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License v2 as published
8 * by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public License
16 * along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -070020#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070021#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050022#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070023#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020024#include <linux/uuid.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070025#include <crypto/aead.h>
Steve French1080ef72011-02-24 18:07:19 +000026#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040027#include "smb2pdu.h"
28#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040029#include "cifsproto.h"
30#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040031#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070032#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070033#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050034#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070035#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040036
Pavel Shilovskyef68e832019-01-18 17:25:36 -080037/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040038static int
39change_conf(struct TCP_Server_Info *server)
40{
41 server->credits += server->echo_credits + server->oplock_credits;
42 server->oplock_credits = server->echo_credits = 0;
43 switch (server->credits) {
44 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080045 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040046 case 1:
47 server->echoes = false;
48 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040049 break;
50 case 2:
51 server->echoes = true;
52 server->oplocks = false;
53 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040054 break;
55 default:
56 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050057 if (enable_oplocks) {
58 server->oplocks = true;
59 server->oplock_credits = 1;
60 } else
61 server->oplocks = false;
62
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040063 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040064 }
65 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080066 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040067}
68
69static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080070smb2_add_credits(struct TCP_Server_Info *server,
71 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040072{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080073 int *val, rc = -1;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080074 unsigned int add = credits->value;
75 unsigned int instance = credits->instance;
76 bool reconnect_detected = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080077
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040078 spin_lock(&server->req_lock);
79 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050080
81 /* eg found case where write overlapping reconnect messed up credits */
82 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
83 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
84 server->hostname, *val);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080085 if ((instance == 0) || (instance == server->reconnect_instance))
86 *val += add;
87 else
88 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050089
Steve French141891f2016-09-23 00:44:16 -050090 if (*val > 65000) {
91 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
92 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
93 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040094 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040095 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040096 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070097 /*
98 * Sometimes server returns 0 credits on oplock break ack - we need to
99 * rebalance credits in this case.
100 */
101 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
102 server->oplocks) {
103 if (server->credits > 1) {
104 server->credits--;
105 server->oplock_credits++;
106 }
107 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400108 spin_unlock(&server->req_lock);
109 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800110
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800111 if (reconnect_detected)
112 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
113 add, instance);
114
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800115 if (server->tcpStatus == CifsNeedReconnect
116 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800117 return;
118
119 switch (rc) {
120 case -1:
121 /* change_conf hasn't been executed */
122 break;
123 case 0:
124 cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
125 break;
126 case 1:
127 cifs_dbg(VFS, "disabling echoes and oplocks\n");
128 break;
129 case 2:
130 cifs_dbg(FYI, "disabling oplocks\n");
131 break;
132 default:
133 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
134 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400135}
136
137static void
138smb2_set_credits(struct TCP_Server_Info *server, const int val)
139{
140 spin_lock(&server->req_lock);
141 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500142 if (val == 1)
143 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400144 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500145 /* don't log while holding the lock */
146 if (val == 1)
147 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400148}
149
150static int *
151smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
152{
153 switch (optype) {
154 case CIFS_ECHO_OP:
155 return &server->echo_credits;
156 case CIFS_OBREAK_OP:
157 return &server->oplock_credits;
158 default:
159 return &server->credits;
160 }
161}
162
163static unsigned int
164smb2_get_credits(struct mid_q_entry *mid)
165{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000166 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700167
Pavel Shilovsky3d3003f2019-01-22 16:50:21 -0800168 if (mid->mid_state == MID_RESPONSE_RECEIVED
169 || mid->mid_state == MID_RESPONSE_MALFORMED)
170 return le16_to_cpu(shdr->CreditRequest);
171
172 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400173}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400174
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400175static int
176smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800177 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400178{
179 int rc = 0;
180 unsigned int scredits;
181
182 spin_lock(&server->req_lock);
183 while (1) {
184 if (server->credits <= 0) {
185 spin_unlock(&server->req_lock);
186 cifs_num_waiters_inc(server);
187 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000188 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400189 cifs_num_waiters_dec(server);
190 if (rc)
191 return rc;
192 spin_lock(&server->req_lock);
193 } else {
194 if (server->tcpStatus == CifsExiting) {
195 spin_unlock(&server->req_lock);
196 return -ENOENT;
197 }
198
199 scredits = server->credits;
200 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800201 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400202 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800203 credits->value = 0;
204 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400205 break;
206 }
207
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800208 /* leave some credits for reopen and other ops */
209 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400210 *num = min_t(unsigned int, size,
211 scredits * SMB2_MAX_BUFFER_SIZE);
212
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800213 credits->value =
214 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
215 credits->instance = server->reconnect_instance;
216 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400217 server->in_flight++;
218 break;
219 }
220 }
221 spin_unlock(&server->req_lock);
222 return rc;
223}
224
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800225static int
226smb2_adjust_credits(struct TCP_Server_Info *server,
227 struct cifs_credits *credits,
228 const unsigned int payload_size)
229{
230 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
231
232 if (!credits->value || credits->value == new_val)
233 return 0;
234
235 if (credits->value < new_val) {
236 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
237 credits->value, new_val);
238 return -ENOTSUPP;
239 }
240
241 spin_lock(&server->req_lock);
242
243 if (server->reconnect_instance != credits->instance) {
244 spin_unlock(&server->req_lock);
245 cifs_dbg(VFS, "trying to return %d credits to old session\n",
246 credits->value - new_val);
247 return -EAGAIN;
248 }
249
250 server->credits += credits->value - new_val;
251 spin_unlock(&server->req_lock);
252 wake_up(&server->request_q);
253 credits->value = new_val;
254 return 0;
255}
256
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400257static __u64
258smb2_get_next_mid(struct TCP_Server_Info *server)
259{
260 __u64 mid;
261 /* for SMB2 we need the current value */
262 spin_lock(&GlobalMid_Lock);
263 mid = server->CurrentMid++;
264 spin_unlock(&GlobalMid_Lock);
265 return mid;
266}
Steve French1080ef72011-02-24 18:07:19 +0000267
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800268static void
269smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
270{
271 spin_lock(&GlobalMid_Lock);
272 if (server->CurrentMid >= val)
273 server->CurrentMid -= val;
274 spin_unlock(&GlobalMid_Lock);
275}
276
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400277static struct mid_q_entry *
278smb2_find_mid(struct TCP_Server_Info *server, char *buf)
279{
280 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000281 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700282 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400283
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700284 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Steve French373512e2015-12-18 13:05:30 -0600285 cifs_dbg(VFS, "encrypted frame parsing not supported yet");
286 return NULL;
287 }
288
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400289 spin_lock(&GlobalMid_Lock);
290 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000291 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400292 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700293 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200294 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400295 spin_unlock(&GlobalMid_Lock);
296 return mid;
297 }
298 }
299 spin_unlock(&GlobalMid_Lock);
300 return NULL;
301}
302
303static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600304smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400305{
306#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000307 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400308
Joe Perchesf96637b2013-05-04 22:12:25 -0500309 cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700310 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
311 shdr->ProcessId);
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600312 cifs_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500313 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400314#endif
315}
316
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400317static bool
318smb2_need_neg(struct TCP_Server_Info *server)
319{
320 return server->max_read == 0;
321}
322
323static int
324smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
325{
326 int rc;
327 ses->server->CurrentMid = 0;
328 rc = SMB2_negotiate(xid, ses);
329 /* BB we probably don't need to retry with modern servers */
330 if (rc == -EAGAIN)
331 rc = -EHOSTDOWN;
332 return rc;
333}
334
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700335static unsigned int
336smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
337{
338 struct TCP_Server_Info *server = tcon->ses->server;
339 unsigned int wsize;
340
341 /* start with specified wsize, or default */
342 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
343 wsize = min_t(unsigned int, wsize, server->max_write);
Long Li09902f82017-11-22 17:38:39 -0700344#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700345 if (server->rdma) {
346 if (server->sign)
347 wsize = min_t(unsigned int,
348 wsize, server->smbd_conn->max_fragmented_send_size);
349 else
350 wsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700351 wsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700352 }
Long Li09902f82017-11-22 17:38:39 -0700353#endif
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400354 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
355 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700356
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700357 return wsize;
358}
359
360static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500361smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
362{
363 struct TCP_Server_Info *server = tcon->ses->server;
364 unsigned int wsize;
365
366 /* start with specified wsize, or default */
367 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
368 wsize = min_t(unsigned int, wsize, server->max_write);
369#ifdef CONFIG_CIFS_SMB_DIRECT
370 if (server->rdma) {
371 if (server->sign)
372 wsize = min_t(unsigned int,
373 wsize, server->smbd_conn->max_fragmented_send_size);
374 else
375 wsize = min_t(unsigned int,
376 wsize, server->smbd_conn->max_readwrite_size);
377 }
378#endif
379 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
380 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
381
382 return wsize;
383}
384
385static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700386smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
387{
388 struct TCP_Server_Info *server = tcon->ses->server;
389 unsigned int rsize;
390
391 /* start with specified rsize, or default */
392 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
393 rsize = min_t(unsigned int, rsize, server->max_read);
Long Li09902f82017-11-22 17:38:39 -0700394#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700395 if (server->rdma) {
396 if (server->sign)
397 rsize = min_t(unsigned int,
398 rsize, server->smbd_conn->max_fragmented_recv_size);
399 else
400 rsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700401 rsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700402 }
Long Li09902f82017-11-22 17:38:39 -0700403#endif
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400404
405 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
406 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700407
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700408 return rsize;
409}
410
Steve French3d621232018-09-25 15:33:47 -0500411static unsigned int
412smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
413{
414 struct TCP_Server_Info *server = tcon->ses->server;
415 unsigned int rsize;
416
417 /* start with specified rsize, or default */
418 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
419 rsize = min_t(unsigned int, rsize, server->max_read);
420#ifdef CONFIG_CIFS_SMB_DIRECT
421 if (server->rdma) {
422 if (server->sign)
423 rsize = min_t(unsigned int,
424 rsize, server->smbd_conn->max_fragmented_recv_size);
425 else
426 rsize = min_t(unsigned int,
427 rsize, server->smbd_conn->max_readwrite_size);
428 }
429#endif
430
431 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
432 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
433
434 return rsize;
435}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200436
437static int
438parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
439 size_t buf_len,
440 struct cifs_server_iface **iface_list,
441 size_t *iface_count)
442{
443 struct network_interface_info_ioctl_rsp *p;
444 struct sockaddr_in *addr4;
445 struct sockaddr_in6 *addr6;
446 struct iface_info_ipv4 *p4;
447 struct iface_info_ipv6 *p6;
448 struct cifs_server_iface *info;
449 ssize_t bytes_left;
450 size_t next = 0;
451 int nb_iface = 0;
452 int rc = 0;
453
454 *iface_list = NULL;
455 *iface_count = 0;
456
457 /*
458 * Fist pass: count and sanity check
459 */
460
461 bytes_left = buf_len;
462 p = buf;
463 while (bytes_left >= sizeof(*p)) {
464 nb_iface++;
465 next = le32_to_cpu(p->Next);
466 if (!next) {
467 bytes_left -= sizeof(*p);
468 break;
469 }
470 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
471 bytes_left -= next;
472 }
473
474 if (!nb_iface) {
475 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
476 rc = -EINVAL;
477 goto out;
478 }
479
480 if (bytes_left || p->Next)
481 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
482
483
484 /*
485 * Second pass: extract info to internal structure
486 */
487
488 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
489 if (!*iface_list) {
490 rc = -ENOMEM;
491 goto out;
492 }
493
494 info = *iface_list;
495 bytes_left = buf_len;
496 p = buf;
497 while (bytes_left >= sizeof(*p)) {
498 info->speed = le64_to_cpu(p->LinkSpeed);
499 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
500 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
501
502 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
503 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
504 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
505 le32_to_cpu(p->Capability));
506
507 switch (p->Family) {
508 /*
509 * The kernel and wire socket structures have the same
510 * layout and use network byte order but make the
511 * conversion explicit in case either one changes.
512 */
513 case INTERNETWORK:
514 addr4 = (struct sockaddr_in *)&info->sockaddr;
515 p4 = (struct iface_info_ipv4 *)p->Buffer;
516 addr4->sin_family = AF_INET;
517 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
518
519 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
520 addr4->sin_port = cpu_to_be16(CIFS_PORT);
521
522 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
523 &addr4->sin_addr);
524 break;
525 case INTERNETWORKV6:
526 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
527 p6 = (struct iface_info_ipv6 *)p->Buffer;
528 addr6->sin6_family = AF_INET6;
529 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
530
531 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
532 addr6->sin6_flowinfo = 0;
533 addr6->sin6_scope_id = 0;
534 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
535
536 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
537 &addr6->sin6_addr);
538 break;
539 default:
540 cifs_dbg(VFS,
541 "%s: skipping unsupported socket family\n",
542 __func__);
543 goto next_iface;
544 }
545
546 (*iface_count)++;
547 info++;
548next_iface:
549 next = le32_to_cpu(p->Next);
550 if (!next)
551 break;
552 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
553 bytes_left -= next;
554 }
555
556 if (!*iface_count) {
557 rc = -EINVAL;
558 goto out;
559 }
560
561out:
562 if (rc) {
563 kfree(*iface_list);
564 *iface_count = 0;
565 *iface_list = NULL;
566 }
567 return rc;
568}
569
570
Steve Frenchc481e9f2013-10-14 01:21:53 -0500571static int
572SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
573{
574 int rc;
575 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200576 struct network_interface_info_ioctl_rsp *out_buf = NULL;
577 struct cifs_server_iface *iface_list;
578 size_t iface_count;
579 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500580
581 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
582 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
583 NULL /* no data input */, 0 /* no data input */,
584 (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500585 if (rc == -EOPNOTSUPP) {
586 cifs_dbg(FYI,
587 "server does not support query network interfaces\n");
588 goto out;
589 } else if (rc != 0) {
Steve French9ffc5412014-10-16 15:13:14 -0500590 cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200591 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500592 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200593
594 rc = parse_server_interfaces(out_buf, ret_data_len,
595 &iface_list, &iface_count);
596 if (rc)
597 goto out;
598
599 spin_lock(&ses->iface_lock);
600 kfree(ses->iface_list);
601 ses->iface_list = iface_list;
602 ses->iface_count = iface_count;
603 ses->iface_last_update = jiffies;
604 spin_unlock(&ses->iface_lock);
605
606out:
Steve French24df1482016-09-29 04:20:23 -0500607 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500608 return rc;
609}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500610
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000611static void
612smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000613{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000614 struct cached_fid *cfid = container_of(ref, struct cached_fid,
615 refcount);
616
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000617 if (cfid->is_valid) {
618 cifs_dbg(FYI, "clear cached root file handle\n");
619 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
620 cfid->fid->volatile_fid);
621 cfid->is_valid = false;
622 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000623}
624
625void close_shroot(struct cached_fid *cfid)
626{
627 mutex_lock(&cfid->fid_mutex);
628 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000629 mutex_unlock(&cfid->fid_mutex);
630}
631
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000632void
633smb2_cached_lease_break(struct work_struct *work)
634{
635 struct cached_fid *cfid = container_of(work,
636 struct cached_fid, lease_break);
637
638 close_shroot(cfid);
639}
640
Steve French3d4ef9a2018-04-25 22:19:09 -0500641/*
642 * Open the directory at the root of a share
643 */
644int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
645{
646 struct cifs_open_parms oparams;
647 int rc;
648 __le16 srch_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000649 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Steve French3d4ef9a2018-04-25 22:19:09 -0500650
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000651 mutex_lock(&tcon->crfid.fid_mutex);
652 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500653 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000654 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000655 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000656 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500657 return 0;
658 }
659
660 oparams.tcon = tcon;
661 oparams.create_options = 0;
662 oparams.desired_access = FILE_READ_ATTRIBUTES;
663 oparams.disposition = FILE_OPEN;
664 oparams.fid = pfid;
665 oparams.reconnect = false;
666
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000667 rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500668 if (rc == 0) {
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000669 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
670 tcon->crfid.tcon = tcon;
671 tcon->crfid.is_valid = true;
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000672 kref_init(&tcon->crfid.refcount);
673 kref_get(&tcon->crfid.refcount);
Steve French3d4ef9a2018-04-25 22:19:09 -0500674 }
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000675 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500676 return rc;
677}
678
Steve French34f62642013-10-09 02:07:00 -0500679static void
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500680smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
681{
682 int rc;
683 __le16 srch_path = 0; /* Null - open root of share */
684 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
685 struct cifs_open_parms oparms;
686 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500687 bool no_cached_open = tcon->nohandlecache;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500688
689 oparms.tcon = tcon;
690 oparms.desired_access = FILE_READ_ATTRIBUTES;
691 oparms.disposition = FILE_OPEN;
692 oparms.create_options = 0;
693 oparms.fid = &fid;
694 oparms.reconnect = false;
695
Steve French3d4ef9a2018-04-25 22:19:09 -0500696 if (no_cached_open)
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000697 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
698 NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500699 else
700 rc = open_shroot(xid, tcon, &fid);
701
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500702 if (rc)
703 return;
704
Steve Frenchc481e9f2013-10-14 01:21:53 -0500705 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500706
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500707 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
708 FS_ATTRIBUTE_INFORMATION);
709 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
710 FS_DEVICE_INFORMATION);
711 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500712 FS_VOLUME_INFORMATION);
713 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500714 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500715 if (no_cached_open)
716 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000717 else
718 close_shroot(&tcon->crfid);
719
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500720 return;
721}
722
723static void
Steve French34f62642013-10-09 02:07:00 -0500724smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
725{
726 int rc;
727 __le16 srch_path = 0; /* Null - open root of share */
728 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
729 struct cifs_open_parms oparms;
730 struct cifs_fid fid;
731
732 oparms.tcon = tcon;
733 oparms.desired_access = FILE_READ_ATTRIBUTES;
734 oparms.disposition = FILE_OPEN;
735 oparms.create_options = 0;
736 oparms.fid = &fid;
737 oparms.reconnect = false;
738
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000739 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500740 if (rc)
741 return;
742
Steven French21671142013-10-09 13:36:35 -0500743 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
744 FS_ATTRIBUTE_INFORMATION);
745 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
746 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500747 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
748 return;
749}
750
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400751static int
752smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
753 struct cifs_sb_info *cifs_sb, const char *full_path)
754{
755 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400756 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700757 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400758 struct cifs_open_parms oparms;
759 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400760
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000761 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500762 return 0;
763
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400764 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
765 if (!utf16_path)
766 return -ENOMEM;
767
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400768 oparms.tcon = tcon;
769 oparms.desired_access = FILE_READ_ATTRIBUTES;
770 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -0500771 if (backup_cred(cifs_sb))
772 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
773 else
774 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400775 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400776 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400777
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000778 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400779 if (rc) {
780 kfree(utf16_path);
781 return rc;
782 }
783
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400784 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400785 kfree(utf16_path);
786 return rc;
787}
788
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400789static int
790smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
791 struct cifs_sb_info *cifs_sb, const char *full_path,
792 u64 *uniqueid, FILE_ALL_INFO *data)
793{
794 *uniqueid = le64_to_cpu(data->IndexNumber);
795 return 0;
796}
797
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700798static int
799smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
800 struct cifs_fid *fid, FILE_ALL_INFO *data)
801{
802 int rc;
803 struct smb2_file_all_info *smb2_data;
804
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400805 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700806 GFP_KERNEL);
807 if (smb2_data == NULL)
808 return -ENOMEM;
809
810 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
811 smb2_data);
812 if (!rc)
813 move_smb2_info_to_cifs(data, smb2_data);
814 kfree(smb2_data);
815 return rc;
816}
817
Arnd Bergmann1368f152017-09-05 11:24:15 +0200818#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000819static ssize_t
820move_smb2_ea_to_cifs(char *dst, size_t dst_size,
821 struct smb2_file_full_ea_info *src, size_t src_size,
822 const unsigned char *ea_name)
823{
824 int rc = 0;
825 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
826 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000827 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000828 size_t name_len, value_len, user_name_len;
829
830 while (src_size > 0) {
831 name = &src->ea_data[0];
832 name_len = (size_t)src->ea_name_length;
833 value = &src->ea_data[src->ea_name_length + 1];
834 value_len = (size_t)le16_to_cpu(src->ea_value_length);
835
836 if (name_len == 0) {
837 break;
838 }
839
840 if (src_size < 8 + name_len + 1 + value_len) {
841 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
842 rc = -EIO;
843 goto out;
844 }
845
846 if (ea_name) {
847 if (ea_name_len == name_len &&
848 memcmp(ea_name, name, name_len) == 0) {
849 rc = value_len;
850 if (dst_size == 0)
851 goto out;
852 if (dst_size < value_len) {
853 rc = -ERANGE;
854 goto out;
855 }
856 memcpy(dst, value, value_len);
857 goto out;
858 }
859 } else {
860 /* 'user.' plus a terminating null */
861 user_name_len = 5 + 1 + name_len;
862
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000863 if (buf_size == 0) {
864 /* skip copy - calc size only */
865 rc += user_name_len;
866 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000867 dst_size -= user_name_len;
868 memcpy(dst, "user.", 5);
869 dst += 5;
870 memcpy(dst, src->ea_data, name_len);
871 dst += name_len;
872 *dst = 0;
873 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000874 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000875 } else {
876 /* stop before overrun buffer */
877 rc = -ERANGE;
878 break;
879 }
880 }
881
882 if (!src->next_entry_offset)
883 break;
884
885 if (src_size < le32_to_cpu(src->next_entry_offset)) {
886 /* stop before overrun buffer */
887 rc = -ERANGE;
888 break;
889 }
890 src_size -= le32_to_cpu(src->next_entry_offset);
891 src = (void *)((char *)src +
892 le32_to_cpu(src->next_entry_offset));
893 }
894
895 /* didn't find the named attribute */
896 if (ea_name)
897 rc = -ENODATA;
898
899out:
900 return (ssize_t)rc;
901}
902
903static ssize_t
904smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
905 const unsigned char *path, const unsigned char *ea_name,
906 char *ea_data, size_t buf_size,
907 struct cifs_sb_info *cifs_sb)
908{
909 int rc;
910 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000911 struct kvec rsp_iov = {NULL, 0};
912 int buftype = CIFS_NO_BUFFER;
913 struct smb2_query_info_rsp *rsp;
914 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000915
916 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
917 if (!utf16_path)
918 return -ENOMEM;
919
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000920 rc = smb2_query_info_compound(xid, tcon, utf16_path,
921 FILE_READ_EA,
922 FILE_FULL_EA_INFORMATION,
923 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +1000924 CIFSMaxBufSize -
925 MAX_SMB2_CREATE_RESPONSE_SIZE -
926 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000927 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000928 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000929 /*
930 * If ea_name is NULL (listxattr) and there are no EAs,
931 * return 0 as it's not an error. Otherwise, the specified
932 * ea_name was not found.
933 */
934 if (!ea_name && rc == -ENODATA)
935 rc = 0;
936 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000937 }
938
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000939 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
940 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
941 le32_to_cpu(rsp->OutputBufferLength),
942 &rsp_iov,
943 sizeof(struct smb2_file_full_ea_info));
944 if (rc)
945 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +1000946
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000947 info = (struct smb2_file_full_ea_info *)(
948 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
949 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
950 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +1000951
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000952 qeas_exit:
953 kfree(utf16_path);
954 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000955 return rc;
956}
957
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000958
959static int
960smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
961 const char *path, const char *ea_name, const void *ea_value,
962 const __u16 ea_value_len, const struct nls_table *nls_codepage,
963 struct cifs_sb_info *cifs_sb)
964{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000965 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000966 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000967 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000968 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000969 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000970 struct smb_rqst rqst[3];
971 int resp_buftype[3];
972 struct kvec rsp_iov[3];
973 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
974 struct cifs_open_parms oparms;
975 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
976 struct cifs_fid fid;
977 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
978 unsigned int size[1];
979 void *data[1];
980 struct smb2_file_full_ea_info *ea = NULL;
981 struct kvec close_iov[1];
982 int rc;
983
984 if (smb3_encryption_required(tcon))
985 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000986
987 if (ea_name_len > 255)
988 return -EINVAL;
989
990 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
991 if (!utf16_path)
992 return -ENOMEM;
993
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000994 memset(rqst, 0, sizeof(rqst));
995 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
996 memset(rsp_iov, 0, sizeof(rsp_iov));
997
Ronnie Sahlberg21094642019-02-07 15:48:44 +1000998 if (ses->server->ops->query_all_EAs) {
999 if (!ea_value) {
1000 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1001 ea_name, NULL, 0,
1002 cifs_sb);
1003 if (rc == -ENODATA)
1004 goto sea_exit;
1005 }
1006 }
1007
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001008 /* Open */
1009 memset(&open_iov, 0, sizeof(open_iov));
1010 rqst[0].rq_iov = open_iov;
1011 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1012
1013 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001014 oparms.tcon = tcon;
1015 oparms.desired_access = FILE_WRITE_EA;
1016 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001017 if (backup_cred(cifs_sb))
1018 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1019 else
1020 oparms.create_options = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001021 oparms.fid = &fid;
1022 oparms.reconnect = false;
1023
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001024 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
1025 if (rc)
1026 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001027 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001028
1029
1030 /* Set Info */
1031 memset(&si_iov, 0, sizeof(si_iov));
1032 rqst[1].rq_iov = si_iov;
1033 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001034
1035 len = sizeof(ea) + ea_name_len + ea_value_len + 1;
1036 ea = kzalloc(len, GFP_KERNEL);
1037 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001038 rc = -ENOMEM;
1039 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001040 }
1041
1042 ea->ea_name_length = ea_name_len;
1043 ea->ea_value_length = cpu_to_le16(ea_value_len);
1044 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1045 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1046
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001047 size[0] = len;
1048 data[0] = ea;
1049
1050 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
1051 COMPOUND_FID, current->tgid,
1052 FILE_FULL_EA_INFORMATION,
1053 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001054 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001055 smb2_set_related(&rqst[1]);
1056
1057
1058 /* Close */
1059 memset(&close_iov, 0, sizeof(close_iov));
1060 rqst[2].rq_iov = close_iov;
1061 rqst[2].rq_nvec = 1;
1062 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
1063 smb2_set_related(&rqst[2]);
1064
1065 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1066 resp_buftype, rsp_iov);
1067
1068 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001069 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001070 kfree(utf16_path);
1071 SMB2_open_free(&rqst[0]);
1072 SMB2_set_info_free(&rqst[1]);
1073 SMB2_close_free(&rqst[2]);
1074 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1075 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1076 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001077 return rc;
1078}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001079#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001080
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001081static bool
1082smb2_can_echo(struct TCP_Server_Info *server)
1083{
1084 return server->echoes;
1085}
1086
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001087static void
1088smb2_clear_stats(struct cifs_tcon *tcon)
1089{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001090 int i;
1091 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1092 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1093 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1094 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001095}
1096
1097static void
Steve French769ee6a2013-06-19 14:15:30 -05001098smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1099{
1100 seq_puts(m, "\n\tShare Capabilities:");
1101 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1102 seq_puts(m, " DFS,");
1103 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1104 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1105 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1106 seq_puts(m, " SCALEOUT,");
1107 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1108 seq_puts(m, " CLUSTER,");
1109 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1110 seq_puts(m, " ASYMMETRIC,");
1111 if (tcon->capabilities == 0)
1112 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001113 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1114 seq_puts(m, " Aligned,");
1115 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1116 seq_puts(m, " Partition Aligned,");
1117 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1118 seq_puts(m, " SSD,");
1119 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1120 seq_puts(m, " TRIM-support,");
1121
Steve French769ee6a2013-06-19 14:15:30 -05001122 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001123 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001124 if (tcon->perf_sector_size)
1125 seq_printf(m, "\tOptimal sector size: 0x%x",
1126 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001127 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001128}
1129
1130static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001131smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1132{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001133 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1134 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001135
1136 /*
1137 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1138 * totals (requests sent) since those SMBs are per-session not per tcon
1139 */
Steve French52ce1ac2018-07-31 01:46:47 -05001140 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1141 (long long)(tcon->bytes_read),
1142 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001143 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1144 atomic_read(&tcon->num_local_opens),
1145 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001146 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001147 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1148 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001149 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001150 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1151 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001152 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001153 atomic_read(&sent[SMB2_CREATE_HE]),
1154 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001155 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001156 atomic_read(&sent[SMB2_CLOSE_HE]),
1157 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001158 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001159 atomic_read(&sent[SMB2_FLUSH_HE]),
1160 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001161 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001162 atomic_read(&sent[SMB2_READ_HE]),
1163 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001164 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001165 atomic_read(&sent[SMB2_WRITE_HE]),
1166 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001167 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001168 atomic_read(&sent[SMB2_LOCK_HE]),
1169 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001170 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001171 atomic_read(&sent[SMB2_IOCTL_HE]),
1172 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001173 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001174 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1175 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001176 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001177 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1178 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001179 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001180 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1181 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001182 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001183 atomic_read(&sent[SMB2_SET_INFO_HE]),
1184 atomic_read(&failed[SMB2_SET_INFO_HE]));
1185 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1186 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1187 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001188}
1189
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001190static void
1191smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1192{
David Howells2b0143b2015-03-17 22:25:59 +00001193 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001194 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1195
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001196 cfile->fid.persistent_fid = fid->persistent_fid;
1197 cfile->fid.volatile_fid = fid->volatile_fid;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001198#ifdef CONFIG_CIFS_DEBUG2
1199 cfile->fid.mid = fid->mid;
1200#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001201 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1202 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001203 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001204 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001205}
1206
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001207static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001208smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1209 struct cifs_fid *fid)
1210{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001211 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001212}
1213
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001214static int
Steve French41c13582013-11-14 00:05:36 -06001215SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1216 u64 persistent_fid, u64 volatile_fid,
1217 struct copychunk_ioctl *pcchunk)
1218{
1219 int rc;
1220 unsigned int ret_data_len;
1221 struct resume_key_req *res_key;
1222
1223 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1224 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
1225 NULL, 0 /* no input */,
1226 (char **)&res_key, &ret_data_len);
1227
1228 if (rc) {
1229 cifs_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
1230 goto req_res_key_exit;
1231 }
1232 if (ret_data_len < sizeof(struct resume_key_req)) {
1233 cifs_dbg(VFS, "Invalid refcopy resume key length\n");
1234 rc = -EINVAL;
1235 goto req_res_key_exit;
1236 }
1237 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1238
1239req_res_key_exit:
1240 kfree(res_key);
1241 return rc;
1242}
1243
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001244static int
1245smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001246 struct cifs_tcon *tcon,
1247 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001248 unsigned long p)
1249{
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001250 struct cifs_ses *ses = tcon->ses;
1251 char __user *arg = (char __user *)p;
1252 struct smb_query_info qi;
1253 struct smb_query_info __user *pqi;
1254 int rc = 0;
1255 int flags = 0;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001256 struct smb2_query_info_rsp *rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001257 void *buffer = NULL;
1258 struct smb_rqst rqst[3];
1259 int resp_buftype[3];
1260 struct kvec rsp_iov[3];
1261 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1262 struct cifs_open_parms oparms;
1263 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1264 struct cifs_fid fid;
1265 struct kvec qi_iov[1];
1266 struct kvec close_iov[1];
1267
1268 memset(rqst, 0, sizeof(rqst));
1269 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1270 memset(rsp_iov, 0, sizeof(rsp_iov));
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001271
1272 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1273 return -EFAULT;
1274
1275 if (qi.output_buffer_length > 1024)
1276 return -EINVAL;
1277
1278 if (!ses || !(ses->server))
1279 return -EIO;
1280
1281 if (smb3_encryption_required(tcon))
1282 flags |= CIFS_TRANSFORM_REQ;
1283
1284 buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
1285 if (buffer == NULL)
1286 return -ENOMEM;
1287
1288 if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
1289 qi.output_buffer_length)) {
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001290 rc = -EFAULT;
1291 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001292 }
1293
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001294 /* Open */
1295 memset(&open_iov, 0, sizeof(open_iov));
1296 rqst[0].rq_iov = open_iov;
1297 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001298
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001299 memset(&oparms, 0, sizeof(oparms));
1300 oparms.tcon = tcon;
1301 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
1302 oparms.disposition = FILE_OPEN;
1303 if (is_dir)
1304 oparms.create_options = CREATE_NOT_FILE;
1305 else
1306 oparms.create_options = CREATE_NOT_DIR;
1307 oparms.fid = &fid;
1308 oparms.reconnect = false;
1309
1310 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1311 if (rc)
1312 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001313 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001314
1315 /* Query */
1316 memset(&qi_iov, 0, sizeof(qi_iov));
1317 rqst[1].rq_iov = qi_iov;
1318 rqst[1].rq_nvec = 1;
1319
1320 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001321 qi.file_info_class, qi.info_type,
1322 qi.additional_information,
1323 qi.input_buffer_length,
1324 qi.output_buffer_length, buffer);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001325 if (rc)
1326 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001327 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001328 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001329
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001330 /* Close */
1331 memset(&close_iov, 0, sizeof(close_iov));
1332 rqst[2].rq_iov = close_iov;
1333 rqst[2].rq_nvec = 1;
1334
1335 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001336 if (rc)
1337 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001338 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001339
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001340 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1341 resp_buftype, rsp_iov);
1342 if (rc)
1343 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001344 pqi = (struct smb_query_info __user *)arg;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001345 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001346 if (le32_to_cpu(rsp->OutputBufferLength) < qi.input_buffer_length)
1347 qi.input_buffer_length = le32_to_cpu(rsp->OutputBufferLength);
1348 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1349 sizeof(qi.input_buffer_length))) {
1350 rc = -EFAULT;
1351 goto iqinf_exit;
1352 }
1353 if (copy_to_user(pqi + 1, rsp->Buffer, qi.input_buffer_length)) {
1354 rc = -EFAULT;
1355 goto iqinf_exit;
1356 }
1357
1358 iqinf_exit:
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001359 kfree(buffer);
1360 SMB2_open_free(&rqst[0]);
1361 SMB2_query_info_free(&rqst[1]);
1362 SMB2_close_free(&rqst[2]);
1363 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1364 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1365 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001366 return rc;
1367}
1368
Sachin Prabhu620d8742017-02-10 16:03:51 +05301369static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001370smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001371 struct cifsFileInfo *srcfile,
1372 struct cifsFileInfo *trgtfile, u64 src_off,
1373 u64 len, u64 dest_off)
1374{
1375 int rc;
1376 unsigned int ret_data_len;
1377 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001378 struct copychunk_ioctl_rsp *retbuf = NULL;
1379 struct cifs_tcon *tcon;
1380 int chunks_copied = 0;
1381 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301382 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001383
1384 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1385
1386 if (pcchunk == NULL)
1387 return -ENOMEM;
1388
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001389 cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n");
Steve French41c13582013-11-14 00:05:36 -06001390 /* Request a key from the server to identify the source of the copy */
1391 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1392 srcfile->fid.persistent_fid,
1393 srcfile->fid.volatile_fid, pcchunk);
1394
1395 /* Note: request_res_key sets res_key null only if rc !=0 */
1396 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001397 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001398
1399 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001400 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001401 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001402 pcchunk->Reserved2 = 0;
1403
Steve French9bf0c9c2013-11-16 18:05:28 -06001404 tcon = tlink_tcon(trgtfile->tlink);
1405
1406 while (len > 0) {
1407 pcchunk->SourceOffset = cpu_to_le64(src_off);
1408 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1409 pcchunk->Length =
1410 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1411
1412 /* Request server copy to target from src identified by key */
1413 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001414 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001415 true /* is_fsctl */, (char *)pcchunk,
Steve French9bf0c9c2013-11-16 18:05:28 -06001416 sizeof(struct copychunk_ioctl), (char **)&retbuf,
1417 &ret_data_len);
1418 if (rc == 0) {
1419 if (ret_data_len !=
1420 sizeof(struct copychunk_ioctl_rsp)) {
1421 cifs_dbg(VFS, "invalid cchunk response size\n");
1422 rc = -EIO;
1423 goto cchunk_out;
1424 }
1425 if (retbuf->TotalBytesWritten == 0) {
1426 cifs_dbg(FYI, "no bytes copied\n");
1427 rc = -EIO;
1428 goto cchunk_out;
1429 }
1430 /*
1431 * Check if server claimed to write more than we asked
1432 */
1433 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1434 le32_to_cpu(pcchunk->Length)) {
1435 cifs_dbg(VFS, "invalid copy chunk response\n");
1436 rc = -EIO;
1437 goto cchunk_out;
1438 }
1439 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
1440 cifs_dbg(VFS, "invalid num chunks written\n");
1441 rc = -EIO;
1442 goto cchunk_out;
1443 }
1444 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001445
Sachin Prabhu620d8742017-02-10 16:03:51 +05301446 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1447 src_off += bytes_written;
1448 dest_off += bytes_written;
1449 len -= bytes_written;
1450 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001451
Sachin Prabhu620d8742017-02-10 16:03:51 +05301452 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001453 le32_to_cpu(retbuf->ChunksWritten),
1454 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301455 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001456 } else if (rc == -EINVAL) {
1457 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1458 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001459
Steve French9bf0c9c2013-11-16 18:05:28 -06001460 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1461 le32_to_cpu(retbuf->ChunksWritten),
1462 le32_to_cpu(retbuf->ChunkBytesWritten),
1463 le32_to_cpu(retbuf->TotalBytesWritten));
1464
1465 /*
1466 * Check if this is the first request using these sizes,
1467 * (ie check if copy succeed once with original sizes
1468 * and check if the server gave us different sizes after
1469 * we already updated max sizes on previous request).
1470 * if not then why is the server returning an error now
1471 */
1472 if ((chunks_copied != 0) || chunk_sizes_updated)
1473 goto cchunk_out;
1474
1475 /* Check that server is not asking us to grow size */
1476 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1477 tcon->max_bytes_chunk)
1478 tcon->max_bytes_chunk =
1479 le32_to_cpu(retbuf->ChunkBytesWritten);
1480 else
1481 goto cchunk_out; /* server gave us bogus size */
1482
1483 /* No need to change MaxChunks since already set to 1 */
1484 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001485 } else
1486 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001487 }
1488
1489cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001490 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001491 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301492 if (rc)
1493 return rc;
1494 else
1495 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001496}
1497
1498static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001499smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1500 struct cifs_fid *fid)
1501{
1502 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1503}
1504
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001505static unsigned int
1506smb2_read_data_offset(char *buf)
1507{
1508 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
1509 return rsp->DataOffset;
1510}
1511
1512static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001513smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001514{
1515 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001516
1517 if (in_remaining)
1518 return le32_to_cpu(rsp->DataRemaining);
1519
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001520 return le32_to_cpu(rsp->DataLength);
1521}
1522
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001523
1524static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001525smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001526 struct cifs_io_parms *parms, unsigned int *bytes_read,
1527 char **buf, int *buf_type)
1528{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001529 parms->persistent_fid = pfid->persistent_fid;
1530 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001531 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1532}
1533
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001534static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001535smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001536 struct cifs_io_parms *parms, unsigned int *written,
1537 struct kvec *iov, unsigned long nr_segs)
1538{
1539
Steve Frenchdb8b6312014-09-22 05:13:55 -05001540 parms->persistent_fid = pfid->persistent_fid;
1541 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001542 return SMB2_write(xid, parms, written, iov, nr_segs);
1543}
1544
Steve Frenchd43cc792014-08-13 17:16:29 -05001545/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1546static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1547 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1548{
1549 struct cifsInodeInfo *cifsi;
1550 int rc;
1551
1552 cifsi = CIFS_I(inode);
1553
1554 /* if file already sparse don't bother setting sparse again */
1555 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1556 return true; /* already sparse */
1557
1558 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1559 return true; /* already not sparse */
1560
1561 /*
1562 * Can't check for sparse support on share the usual way via the
1563 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1564 * since Samba server doesn't set the flag on the share, yet
1565 * supports the set sparse FSCTL and returns sparse correctly
1566 * in the file attributes. If we fail setting sparse though we
1567 * mark that server does not support sparse files for this share
1568 * to avoid repeatedly sending the unsupported fsctl to server
1569 * if the file is repeatedly extended.
1570 */
1571 if (tcon->broken_sparse_sup)
1572 return false;
1573
1574 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1575 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001576 true /* is_fctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001577 &setsparse, 1, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001578 if (rc) {
1579 tcon->broken_sparse_sup = true;
1580 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1581 return false;
1582 }
1583
1584 if (setsparse)
1585 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1586 else
1587 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1588
1589 return true;
1590}
1591
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001592static int
1593smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1594 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1595{
1596 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001597 struct inode *inode;
1598
1599 /*
1600 * If extending file more than one page make sparse. Many Linux fs
1601 * make files sparse by default when extending via ftruncate
1602 */
David Howells2b0143b2015-03-17 22:25:59 +00001603 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001604
1605 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001606 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001607
Steve Frenchd43cc792014-08-13 17:16:29 -05001608 /* whether set sparse succeeds or not, extend the file */
1609 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001610 }
1611
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001612 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001613 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001614}
1615
Steve French02b16662015-06-27 21:18:36 -07001616static int
1617smb2_duplicate_extents(const unsigned int xid,
1618 struct cifsFileInfo *srcfile,
1619 struct cifsFileInfo *trgtfile, u64 src_off,
1620 u64 len, u64 dest_off)
1621{
1622 int rc;
1623 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001624 struct duplicate_extents_to_file dup_ext_buf;
1625 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1626
1627 /* server fileays advertise duplicate extent support with this flag */
1628 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1629 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1630 return -EOPNOTSUPP;
1631
1632 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1633 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1634 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1635 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1636 dup_ext_buf.ByteCount = cpu_to_le64(len);
1637 cifs_dbg(FYI, "duplicate extents: src off %lld dst off %lld len %lld",
1638 src_off, dest_off, len);
1639
1640 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1641 if (rc)
1642 goto duplicate_extents_out;
1643
1644 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1645 trgtfile->fid.volatile_fid,
1646 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001647 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001648 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001649 sizeof(struct duplicate_extents_to_file),
Steve French24df1482016-09-29 04:20:23 -05001650 NULL,
Steve French02b16662015-06-27 21:18:36 -07001651 &ret_data_len);
1652
1653 if (ret_data_len > 0)
1654 cifs_dbg(FYI, "non-zero response length in duplicate extents");
1655
1656duplicate_extents_out:
1657 return rc;
1658}
Steve French02b16662015-06-27 21:18:36 -07001659
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001660static int
Steve French64a5cfa2013-10-14 15:31:32 -05001661smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1662 struct cifsFileInfo *cfile)
1663{
1664 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1665 cfile->fid.volatile_fid);
1666}
1667
1668static int
Steve Frenchb3152e22015-06-24 03:17:02 -05001669smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1670 struct cifsFileInfo *cfile)
1671{
1672 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05001673 unsigned int ret_data_len;
1674
1675 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1676 integr_info.Flags = 0;
1677 integr_info.Reserved = 0;
1678
1679 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1680 cfile->fid.volatile_fid,
1681 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001682 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001683 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05001684 sizeof(struct fsctl_set_integrity_information_req),
Steve French24df1482016-09-29 04:20:23 -05001685 NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05001686 &ret_data_len);
1687
1688}
1689
Steve Frenche02789a2018-08-09 14:33:12 -05001690/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1691#define GMT_TOKEN_SIZE 50
1692
1693/*
1694 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1695 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1696 */
Steve Frenchb3152e22015-06-24 03:17:02 -05001697static int
Steve French834170c2016-09-30 21:14:26 -05001698smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1699 struct cifsFileInfo *cfile, void __user *ioc_buf)
1700{
1701 char *retbuf = NULL;
1702 unsigned int ret_data_len = 0;
1703 int rc;
1704 struct smb_snapshot_array snapshot_in;
1705
1706 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1707 cfile->fid.volatile_fid,
1708 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001709 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001710 NULL, 0 /* no input data */,
Steve French834170c2016-09-30 21:14:26 -05001711 (char **)&retbuf,
1712 &ret_data_len);
1713 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
1714 rc, ret_data_len);
1715 if (rc)
1716 return rc;
1717
1718 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
1719 /* Fixup buffer */
1720 if (copy_from_user(&snapshot_in, ioc_buf,
1721 sizeof(struct smb_snapshot_array))) {
1722 rc = -EFAULT;
1723 kfree(retbuf);
1724 return rc;
1725 }
Steve French834170c2016-09-30 21:14:26 -05001726
Steve Frenche02789a2018-08-09 14:33:12 -05001727 /*
1728 * Check for min size, ie not large enough to fit even one GMT
1729 * token (snapshot). On the first ioctl some users may pass in
1730 * smaller size (or zero) to simply get the size of the array
1731 * so the user space caller can allocate sufficient memory
1732 * and retry the ioctl again with larger array size sufficient
1733 * to hold all of the snapshot GMT tokens on the second try.
1734 */
1735 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
1736 ret_data_len = sizeof(struct smb_snapshot_array);
1737
1738 /*
1739 * We return struct SRV_SNAPSHOT_ARRAY, followed by
1740 * the snapshot array (of 50 byte GMT tokens) each
1741 * representing an available previous version of the data
1742 */
1743 if (ret_data_len > (snapshot_in.snapshot_array_size +
1744 sizeof(struct smb_snapshot_array)))
1745 ret_data_len = snapshot_in.snapshot_array_size +
1746 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05001747
1748 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
1749 rc = -EFAULT;
1750 }
1751
1752 kfree(retbuf);
1753 return rc;
1754}
1755
1756static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001757smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
1758 const char *path, struct cifs_sb_info *cifs_sb,
1759 struct cifs_fid *fid, __u16 search_flags,
1760 struct cifs_search_info *srch_inf)
1761{
1762 __le16 *utf16_path;
1763 int rc;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001764 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001765 struct cifs_open_parms oparms;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001766
1767 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1768 if (!utf16_path)
1769 return -ENOMEM;
1770
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001771 oparms.tcon = tcon;
1772 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
1773 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001774 if (backup_cred(cifs_sb))
1775 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1776 else
1777 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001778 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001779 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001780
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10001781 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001782 kfree(utf16_path);
1783 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07001784 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001785 return rc;
1786 }
1787
1788 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02001789 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001790
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001791 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
1792 fid->volatile_fid, 0, srch_inf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001793 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07001794 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001795 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001796 }
1797 return rc;
1798}
1799
1800static int
1801smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
1802 struct cifs_fid *fid, __u16 search_flags,
1803 struct cifs_search_info *srch_inf)
1804{
1805 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
1806 fid->volatile_fid, 0, srch_inf);
1807}
1808
1809static int
1810smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
1811 struct cifs_fid *fid)
1812{
1813 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1814}
1815
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001816/*
1817* If we negotiate SMB2 protocol and get STATUS_PENDING - update
1818* the number of credits and return true. Otherwise - return false.
1819*/
1820static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08001821smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001822{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10001823 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001824
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07001825 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001826 return false;
1827
Pavel Shilovsky66265f12019-01-23 17:11:16 -08001828 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001829 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07001830 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001831 spin_unlock(&server->req_lock);
1832 wake_up(&server->request_q);
1833 }
1834
1835 return true;
1836}
1837
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07001838static bool
1839smb2_is_session_expired(char *buf)
1840{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10001841 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07001842
Mark Symsd81243c2018-05-24 09:47:31 +01001843 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
1844 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07001845 return false;
1846
Steve Frenche68a9322018-07-30 14:23:58 -05001847 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
1848 le16_to_cpu(shdr->Command),
1849 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01001850 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05001851
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07001852 return true;
1853}
1854
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07001855static int
1856smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
1857 struct cifsInodeInfo *cinode)
1858{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07001859 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
1860 return SMB2_lease_break(0, tcon, cinode->lease_key,
1861 smb2_get_lease_state(cinode));
1862
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07001863 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
1864 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001865 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07001866}
1867
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10001868void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001869smb2_set_related(struct smb_rqst *rqst)
1870{
1871 struct smb2_sync_hdr *shdr;
1872
1873 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
1874 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
1875}
1876
1877char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
1878
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10001879void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001880smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001881{
1882 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001883 struct cifs_ses *ses = tcon->ses;
1884 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001885 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001886 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001887
1888 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001889
1890 /* No padding needed */
1891 if (!(len & 7))
1892 goto finished;
1893
1894 num_padding = 8 - (len & 7);
1895 if (!smb3_encryption_required(tcon)) {
1896 /*
1897 * If we do not have encryption then we can just add an extra
1898 * iov for the padding.
1899 */
1900 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
1901 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
1902 rqst->rq_nvec++;
1903 len += num_padding;
1904 } else {
1905 /*
1906 * We can not add a small padding iov for the encryption case
1907 * because the encryption framework can not handle the padding
1908 * iovs.
1909 * We have to flatten this into a single buffer and add
1910 * the padding to it.
1911 */
1912 for (i = 1; i < rqst->rq_nvec; i++) {
1913 memcpy(rqst->rq_iov[0].iov_base +
1914 rqst->rq_iov[0].iov_len,
1915 rqst->rq_iov[i].iov_base,
1916 rqst->rq_iov[i].iov_len);
1917 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06001918 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001919 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
1920 0, num_padding);
1921 rqst->rq_iov[0].iov_len += num_padding;
1922 len += num_padding;
1923 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001924 }
1925
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001926 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001927 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
1928 shdr->NextCommand = cpu_to_le32(len);
1929}
1930
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001931/*
1932 * Passes the query info response back to the caller on success.
1933 * Caller need to free this with free_rsp_buf().
1934 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001935int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001936smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
1937 __le16 *utf16_path, u32 desired_access,
1938 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001939 struct kvec *rsp, int *buftype,
1940 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07001941{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001942 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001943 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001944 struct smb_rqst rqst[3];
1945 int resp_buftype[3];
1946 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10001947 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001948 struct kvec qi_iov[1];
1949 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07001950 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001951 struct cifs_open_parms oparms;
1952 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001953 int rc;
1954
1955 if (smb3_encryption_required(tcon))
1956 flags |= CIFS_TRANSFORM_REQ;
1957
1958 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10001959 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001960 memset(rsp_iov, 0, sizeof(rsp_iov));
1961
1962 memset(&open_iov, 0, sizeof(open_iov));
1963 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10001964 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07001965
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001966 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001967 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001968 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001969 if (cifs_sb && backup_cred(cifs_sb))
1970 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1971 else
1972 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001973 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001974 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001975
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001976 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07001977 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001978 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001979 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001980
1981 memset(&qi_iov, 0, sizeof(qi_iov));
1982 rqst[1].rq_iov = qi_iov;
1983 rqst[1].rq_nvec = 1;
1984
1985 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001986 class, type, 0,
1987 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001988 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001989 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001990 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001991 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001992 smb2_set_related(&rqst[1]);
1993
1994 memset(&close_iov, 0, sizeof(close_iov));
1995 rqst[2].rq_iov = close_iov;
1996 rqst[2].rq_nvec = 1;
1997
1998 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
1999 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002000 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002001 smb2_set_related(&rqst[2]);
2002
2003 rc = compound_send_recv(xid, ses, flags, 3, rqst,
2004 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002005 if (rc) {
2006 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002007 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002008 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002009 *rsp = rsp_iov[1];
2010 *buftype = resp_buftype[1];
2011
2012 qic_exit:
2013 SMB2_open_free(&rqst[0]);
2014 SMB2_query_info_free(&rqst[1]);
2015 SMB2_close_free(&rqst[2]);
2016 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2017 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2018 return rc;
2019}
2020
2021static int
2022smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2023 struct kstatfs *buf)
2024{
2025 struct smb2_query_info_rsp *rsp;
2026 struct smb2_fs_full_size_info *info = NULL;
2027 __le16 utf16_path = 0; /* Null - open root of share */
2028 struct kvec rsp_iov = {NULL, 0};
2029 int buftype = CIFS_NO_BUFFER;
2030 int rc;
2031
2032
2033 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2034 FILE_READ_ATTRIBUTES,
2035 FS_FULL_SIZE_INFORMATION,
2036 SMB2_O_INFO_FILESYSTEM,
2037 sizeof(struct smb2_fs_full_size_info),
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002038 &rsp_iov, &buftype, NULL);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002039 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002040 goto qfs_exit;
2041
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002042 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002043 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002044 info = (struct smb2_fs_full_size_info *)(
2045 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2046 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2047 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002048 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002049 sizeof(struct smb2_fs_full_size_info));
2050 if (!rc)
2051 smb2_copy_fs_info_to_kstatfs(info, buf);
2052
2053qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002054 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002055 return rc;
2056}
2057
Steve French2d304212018-06-24 23:28:12 -05002058static int
2059smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2060 struct kstatfs *buf)
2061{
2062 int rc;
2063 __le16 srch_path = 0; /* Null - open root of share */
2064 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2065 struct cifs_open_parms oparms;
2066 struct cifs_fid fid;
2067
2068 if (!tcon->posix_extensions)
2069 return smb2_queryfs(xid, tcon, buf);
2070
2071 oparms.tcon = tcon;
2072 oparms.desired_access = FILE_READ_ATTRIBUTES;
2073 oparms.disposition = FILE_OPEN;
2074 oparms.create_options = 0;
2075 oparms.fid = &fid;
2076 oparms.reconnect = false;
2077
2078 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
2079 if (rc)
2080 return rc;
2081
2082 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2083 fid.volatile_fid, buf);
2084 buf->f_type = SMB2_MAGIC_NUMBER;
2085 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2086 return rc;
2087}
Steve French2d304212018-06-24 23:28:12 -05002088
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002089static bool
2090smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2091{
2092 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2093 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2094}
2095
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002096static int
2097smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2098 __u64 length, __u32 type, int lock, int unlock, bool wait)
2099{
2100 if (unlock && !lock)
2101 type = SMB2_LOCKFLAG_UNLOCK;
2102 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2103 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2104 current->tgid, length, offset, type, wait);
2105}
2106
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002107static void
2108smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2109{
2110 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2111}
2112
2113static void
2114smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2115{
2116 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2117}
2118
2119static void
2120smb2_new_lease_key(struct cifs_fid *fid)
2121{
Steve Frenchfa70b872016-09-22 00:39:34 -05002122 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002123}
2124
Aurelien Aptel9d496402017-02-13 16:16:49 +01002125static int
2126smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2127 const char *search_name,
2128 struct dfs_info3_param **target_nodes,
2129 unsigned int *num_of_nodes,
2130 const struct nls_table *nls_codepage, int remap)
2131{
2132 int rc;
2133 __le16 *utf16_path = NULL;
2134 int utf16_path_len = 0;
2135 struct cifs_tcon *tcon;
2136 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2137 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2138 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2139
2140 cifs_dbg(FYI, "smb2_get_dfs_refer path <%s>\n", search_name);
2141
2142 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002143 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002144 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002145 tcon = ses->tcon_ipc;
2146 if (tcon == NULL) {
2147 spin_lock(&cifs_tcp_ses_lock);
2148 tcon = list_first_entry_or_null(&ses->tcon_list,
2149 struct cifs_tcon,
2150 tcon_list);
2151 if (tcon)
2152 tcon->tc_count++;
2153 spin_unlock(&cifs_tcp_ses_lock);
2154 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002155
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002156 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002157 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2158 ses);
2159 rc = -ENOTCONN;
2160 goto out;
2161 }
2162
2163 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2164 &utf16_path_len,
2165 nls_codepage, remap);
2166 if (!utf16_path) {
2167 rc = -ENOMEM;
2168 goto out;
2169 }
2170
2171 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2172 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2173 if (!dfs_req) {
2174 rc = -ENOMEM;
2175 goto out;
2176 }
2177
2178 /* Highest DFS referral version understood */
2179 dfs_req->MaxReferralLevel = DFS_VERSION;
2180
2181 /* Path to resolve in an UTF-16 null-terminated string */
2182 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2183
2184 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002185 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2186 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002187 true /* is_fsctl */,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002188 (char *)dfs_req, dfs_req_size,
2189 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002190 } while (rc == -EAGAIN);
2191
2192 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002193 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Aurelien Aptel57025912017-11-21 14:47:56 +01002194 cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002195 goto out;
2196 }
2197
2198 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2199 num_of_nodes, target_nodes,
2200 nls_codepage, remap, search_name,
2201 true /* is_unicode */);
2202 if (rc) {
2203 cifs_dbg(VFS, "parse error in smb2_get_dfs_refer rc=%d\n", rc);
2204 goto out;
2205 }
2206
2207 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002208 if (tcon && !tcon->ipc) {
2209 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002210 spin_lock(&cifs_tcp_ses_lock);
2211 tcon->tc_count--;
2212 spin_unlock(&cifs_tcp_ses_lock);
2213 }
2214 kfree(utf16_path);
2215 kfree(dfs_req);
2216 kfree(dfs_rsp);
2217 return rc;
2218}
Pavel Shilovsky78932422016-07-24 10:37:38 +03002219#define SMB2_SYMLINK_STRUCT_SIZE \
2220 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2221
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002222static int
2223smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2224 const char *full_path, char **target_path,
2225 struct cifs_sb_info *cifs_sb)
2226{
2227 int rc;
2228 __le16 *utf16_path;
2229 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2230 struct cifs_open_parms oparms;
2231 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002232 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002233 struct smb2_err_rsp *err_buf = NULL;
2234 int resp_buftype;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002235 struct smb2_symlink_err_rsp *symlink;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002236 unsigned int sub_len;
2237 unsigned int sub_offset;
2238 unsigned int print_len;
2239 unsigned int print_offset;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002240
2241 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2242
2243 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2244 if (!utf16_path)
2245 return -ENOMEM;
2246
2247 oparms.tcon = tcon;
2248 oparms.desired_access = FILE_READ_ATTRIBUTES;
2249 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05002250 if (backup_cred(cifs_sb))
2251 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2252 else
2253 oparms.create_options = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002254 oparms.fid = &fid;
2255 oparms.reconnect = false;
2256
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002257 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
2258 &resp_buftype);
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002259 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002260 rc = -ENOENT;
Dan Carpenterff361fd2018-06-19 15:25:30 +03002261 goto free_path;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002262 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002263
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002264 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002265 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002266 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002267 rc = -ENOENT;
2268 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002269 }
2270
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002271 /* open must fail on symlink - reset rc */
2272 rc = 0;
2273 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2274 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2275 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002276 print_len = le16_to_cpu(symlink->PrintNameLength);
2277 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2278
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002279 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002280 rc = -ENOENT;
2281 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002282 }
2283
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002284 if (err_iov.iov_len <
2285 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002286 rc = -ENOENT;
2287 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002288 }
2289
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002290 *target_path = cifs_strndup_from_utf16(
2291 (char *)symlink->PathBuffer + sub_offset,
2292 sub_len, true, cifs_sb->local_nls);
2293 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002294 rc = -ENOMEM;
2295 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002296 }
2297 convert_delimiter(*target_path, '/');
2298 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002299
2300 querty_exit:
2301 free_rsp_buf(resp_buftype, err_buf);
Dan Carpenterff361fd2018-06-19 15:25:30 +03002302 free_path:
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002303 kfree(utf16_path);
2304 return rc;
2305}
2306
Arnd Bergmann84908422017-06-27 17:06:13 +02002307#ifdef CONFIG_CIFS_ACL
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002308static struct cifs_ntsd *
2309get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2310 const struct cifs_fid *cifsfid, u32 *pacllen)
2311{
2312 struct cifs_ntsd *pntsd = NULL;
2313 unsigned int xid;
2314 int rc = -EOPNOTSUPP;
2315 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2316
2317 if (IS_ERR(tlink))
2318 return ERR_CAST(tlink);
2319
2320 xid = get_xid();
2321 cifs_dbg(FYI, "trying to get acl\n");
2322
2323 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2324 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2325 free_xid(xid);
2326
2327 cifs_put_tlink(tlink);
2328
2329 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2330 if (rc)
2331 return ERR_PTR(rc);
2332 return pntsd;
2333
2334}
2335
2336static struct cifs_ntsd *
2337get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2338 const char *path, u32 *pacllen)
2339{
2340 struct cifs_ntsd *pntsd = NULL;
2341 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2342 unsigned int xid;
2343 int rc;
2344 struct cifs_tcon *tcon;
2345 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2346 struct cifs_fid fid;
2347 struct cifs_open_parms oparms;
2348 __le16 *utf16_path;
2349
2350 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
2351 if (IS_ERR(tlink))
2352 return ERR_CAST(tlink);
2353
2354 tcon = tlink_tcon(tlink);
2355 xid = get_xid();
2356
2357 if (backup_cred(cifs_sb))
Colin Ian King709340a2017-07-05 13:47:34 +01002358 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002359 else
2360 oparms.create_options = 0;
2361
2362 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002363 if (!utf16_path) {
2364 rc = -ENOMEM;
2365 free_xid(xid);
2366 return ERR_PTR(rc);
2367 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002368
2369 oparms.tcon = tcon;
2370 oparms.desired_access = READ_CONTROL;
2371 oparms.disposition = FILE_OPEN;
2372 oparms.fid = &fid;
2373 oparms.reconnect = false;
2374
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002375 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002376 kfree(utf16_path);
2377 if (!rc) {
2378 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2379 fid.volatile_fid, (void **)&pntsd, pacllen);
2380 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2381 }
2382
2383 cifs_put_tlink(tlink);
2384 free_xid(xid);
2385
2386 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2387 if (rc)
2388 return ERR_PTR(rc);
2389 return pntsd;
2390}
2391
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002392#ifdef CONFIG_CIFS_ACL
2393static int
2394set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2395 struct inode *inode, const char *path, int aclflag)
2396{
2397 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2398 unsigned int xid;
2399 int rc, access_flags = 0;
2400 struct cifs_tcon *tcon;
2401 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2402 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2403 struct cifs_fid fid;
2404 struct cifs_open_parms oparms;
2405 __le16 *utf16_path;
2406
2407 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
2408 if (IS_ERR(tlink))
2409 return PTR_ERR(tlink);
2410
2411 tcon = tlink_tcon(tlink);
2412 xid = get_xid();
2413
2414 if (backup_cred(cifs_sb))
2415 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2416 else
2417 oparms.create_options = 0;
2418
2419 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
2420 access_flags = WRITE_OWNER;
2421 else
2422 access_flags = WRITE_DAC;
2423
2424 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002425 if (!utf16_path) {
2426 rc = -ENOMEM;
2427 free_xid(xid);
2428 return rc;
2429 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002430
2431 oparms.tcon = tcon;
2432 oparms.desired_access = access_flags;
2433 oparms.disposition = FILE_OPEN;
2434 oparms.path = path;
2435 oparms.fid = &fid;
2436 oparms.reconnect = false;
2437
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002438 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002439 kfree(utf16_path);
2440 if (!rc) {
2441 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2442 fid.volatile_fid, pnntsd, acllen, aclflag);
2443 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2444 }
2445
2446 cifs_put_tlink(tlink);
2447 free_xid(xid);
2448 return rc;
2449}
2450#endif /* CIFS_ACL */
2451
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002452/* Retrieve an ACL from the server */
2453static struct cifs_ntsd *
2454get_smb2_acl(struct cifs_sb_info *cifs_sb,
2455 struct inode *inode, const char *path,
2456 u32 *pacllen)
2457{
2458 struct cifs_ntsd *pntsd = NULL;
2459 struct cifsFileInfo *open_file = NULL;
2460
2461 if (inode)
2462 open_file = find_readable_file(CIFS_I(inode), true);
2463 if (!open_file)
2464 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
2465
2466 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
2467 cifsFileInfo_put(open_file);
2468 return pntsd;
2469}
Arnd Bergmann84908422017-06-27 17:06:13 +02002470#endif
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002471
Steve French30175622014-08-17 18:16:40 -05002472static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2473 loff_t offset, loff_t len, bool keep_size)
2474{
2475 struct inode *inode;
2476 struct cifsInodeInfo *cifsi;
2477 struct cifsFileInfo *cfile = file->private_data;
2478 struct file_zero_data_information fsctl_buf;
2479 long rc;
2480 unsigned int xid;
2481
2482 xid = get_xid();
2483
David Howells2b0143b2015-03-17 22:25:59 +00002484 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05002485 cifsi = CIFS_I(inode);
2486
2487 /* if file not oplocked can't be sure whether asking to extend size */
2488 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002489 if (keep_size == false) {
2490 rc = -EOPNOTSUPP;
2491 free_xid(xid);
2492 return rc;
2493 }
Steve French30175622014-08-17 18:16:40 -05002494
Steve French2bb93d22014-08-20 18:56:29 -05002495 /*
Steve French30175622014-08-17 18:16:40 -05002496 * Must check if file sparse since fallocate -z (zero range) assumes
2497 * non-sparse allocation
2498 */
Steve Frenchcfe89092018-05-19 02:04:55 -05002499 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
2500 rc = -EOPNOTSUPP;
2501 free_xid(xid);
2502 return rc;
2503 }
Steve French30175622014-08-17 18:16:40 -05002504
2505 /*
2506 * need to make sure we are not asked to extend the file since the SMB3
2507 * fsctl does not change the file size. In the future we could change
2508 * this to zero the first part of the range then set the file size
2509 * which for a non sparse file would zero the newly extended range
2510 */
2511 if (keep_size == false)
Steve Frenchcfe89092018-05-19 02:04:55 -05002512 if (i_size_read(inode) < offset + len) {
2513 rc = -EOPNOTSUPP;
2514 free_xid(xid);
2515 return rc;
2516 }
Steve French30175622014-08-17 18:16:40 -05002517
2518 cifs_dbg(FYI, "offset %lld len %lld", offset, len);
2519
2520 fsctl_buf.FileOffset = cpu_to_le64(offset);
2521 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2522
2523 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2524 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002525 true /* is_fctl */, (char *)&fsctl_buf,
Steve French30175622014-08-17 18:16:40 -05002526 sizeof(struct file_zero_data_information), NULL, NULL);
2527 free_xid(xid);
2528 return rc;
2529}
2530
Steve French31742c52014-08-17 08:38:47 -05002531static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2532 loff_t offset, loff_t len)
2533{
2534 struct inode *inode;
2535 struct cifsInodeInfo *cifsi;
2536 struct cifsFileInfo *cfile = file->private_data;
2537 struct file_zero_data_information fsctl_buf;
2538 long rc;
2539 unsigned int xid;
2540 __u8 set_sparse = 1;
2541
2542 xid = get_xid();
2543
David Howells2b0143b2015-03-17 22:25:59 +00002544 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05002545 cifsi = CIFS_I(inode);
2546
2547 /* Need to make file sparse, if not already, before freeing range. */
2548 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05002549 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
2550 rc = -EOPNOTSUPP;
2551 free_xid(xid);
2552 return rc;
2553 }
Steve French31742c52014-08-17 08:38:47 -05002554
2555 cifs_dbg(FYI, "offset %lld len %lld", offset, len);
2556
2557 fsctl_buf.FileOffset = cpu_to_le64(offset);
2558 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2559
2560 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2561 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002562 true /* is_fctl */, (char *)&fsctl_buf,
Steve French31742c52014-08-17 08:38:47 -05002563 sizeof(struct file_zero_data_information), NULL, NULL);
2564 free_xid(xid);
2565 return rc;
2566}
2567
Steve French9ccf3212014-10-18 17:01:15 -05002568static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
2569 loff_t off, loff_t len, bool keep_size)
2570{
2571 struct inode *inode;
2572 struct cifsInodeInfo *cifsi;
2573 struct cifsFileInfo *cfile = file->private_data;
2574 long rc = -EOPNOTSUPP;
2575 unsigned int xid;
2576
2577 xid = get_xid();
2578
David Howells2b0143b2015-03-17 22:25:59 +00002579 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05002580 cifsi = CIFS_I(inode);
2581
2582 /* if file not oplocked can't be sure whether asking to extend size */
2583 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002584 if (keep_size == false) {
2585 free_xid(xid);
2586 return rc;
2587 }
Steve French9ccf3212014-10-18 17:01:15 -05002588
2589 /*
2590 * Files are non-sparse by default so falloc may be a no-op
2591 * Must check if file sparse. If not sparse, and not extending
2592 * then no need to do anything since file already allocated
2593 */
2594 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
2595 if (keep_size == true)
Steve Frenchcfe89092018-05-19 02:04:55 -05002596 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05002597 /* check if extending file */
2598 else if (i_size_read(inode) >= off + len)
2599 /* not extending file and already not sparse */
Steve Frenchcfe89092018-05-19 02:04:55 -05002600 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05002601 /* BB: in future add else clause to extend file */
2602 else
Steve Frenchcfe89092018-05-19 02:04:55 -05002603 rc = -EOPNOTSUPP;
2604 free_xid(xid);
2605 return rc;
Steve French9ccf3212014-10-18 17:01:15 -05002606 }
2607
2608 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
2609 /*
2610 * Check if falloc starts within first few pages of file
2611 * and ends within a few pages of the end of file to
2612 * ensure that most of file is being forced to be
2613 * fallocated now. If so then setting whole file sparse
2614 * ie potentially making a few extra pages at the beginning
2615 * or end of the file non-sparse via set_sparse is harmless.
2616 */
Steve Frenchcfe89092018-05-19 02:04:55 -05002617 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
2618 rc = -EOPNOTSUPP;
2619 free_xid(xid);
2620 return rc;
2621 }
Steve French9ccf3212014-10-18 17:01:15 -05002622
2623 rc = smb2_set_sparse(xid, tcon, cfile, inode, false);
2624 }
2625 /* BB: else ... in future add code to extend file and set sparse */
2626
2627
2628 free_xid(xid);
2629 return rc;
2630}
2631
2632
Steve French31742c52014-08-17 08:38:47 -05002633static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
2634 loff_t off, loff_t len)
2635{
2636 /* KEEP_SIZE already checked for by do_fallocate */
2637 if (mode & FALLOC_FL_PUNCH_HOLE)
2638 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05002639 else if (mode & FALLOC_FL_ZERO_RANGE) {
2640 if (mode & FALLOC_FL_KEEP_SIZE)
2641 return smb3_zero_range(file, tcon, off, len, true);
2642 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05002643 } else if (mode == FALLOC_FL_KEEP_SIZE)
2644 return smb3_simple_falloc(file, tcon, off, len, true);
2645 else if (mode == 0)
2646 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05002647
2648 return -EOPNOTSUPP;
2649}
2650
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002651static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002652smb2_downgrade_oplock(struct TCP_Server_Info *server,
2653 struct cifsInodeInfo *cinode, bool set_level2)
2654{
2655 if (set_level2)
2656 server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
2657 0, NULL);
2658 else
2659 server->ops->set_oplock_level(cinode, 0, 0, NULL);
2660}
2661
2662static void
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08002663smb21_downgrade_oplock(struct TCP_Server_Info *server,
2664 struct cifsInodeInfo *cinode, bool set_level2)
2665{
2666 server->ops->set_oplock_level(cinode,
2667 set_level2 ? SMB2_LEASE_READ_CACHING_HE :
2668 0, 0, NULL);
2669}
2670
2671static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002672smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
2673 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002674{
2675 oplock &= 0xFF;
2676 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
2677 return;
2678 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002679 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002680 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
2681 &cinode->vfs_inode);
2682 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002683 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002684 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
2685 &cinode->vfs_inode);
2686 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
2687 cinode->oplock = CIFS_CACHE_READ_FLG;
2688 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
2689 &cinode->vfs_inode);
2690 } else
2691 cinode->oplock = 0;
2692}
2693
2694static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002695smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
2696 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002697{
2698 char message[5] = {0};
2699
2700 oplock &= 0xFF;
2701 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
2702 return;
2703
2704 cinode->oplock = 0;
2705 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
2706 cinode->oplock |= CIFS_CACHE_READ_FLG;
2707 strcat(message, "R");
2708 }
2709 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
2710 cinode->oplock |= CIFS_CACHE_HANDLE_FLG;
2711 strcat(message, "H");
2712 }
2713 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
2714 cinode->oplock |= CIFS_CACHE_WRITE_FLG;
2715 strcat(message, "W");
2716 }
2717 if (!cinode->oplock)
2718 strcat(message, "None");
2719 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
2720 &cinode->vfs_inode);
2721}
2722
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002723static void
2724smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
2725 unsigned int epoch, bool *purge_cache)
2726{
2727 unsigned int old_oplock = cinode->oplock;
2728
2729 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
2730
2731 if (purge_cache) {
2732 *purge_cache = false;
2733 if (old_oplock == CIFS_CACHE_READ_FLG) {
2734 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
2735 (epoch - cinode->epoch > 0))
2736 *purge_cache = true;
2737 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
2738 (epoch - cinode->epoch > 1))
2739 *purge_cache = true;
2740 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
2741 (epoch - cinode->epoch > 1))
2742 *purge_cache = true;
2743 else if (cinode->oplock == 0 &&
2744 (epoch - cinode->epoch > 0))
2745 *purge_cache = true;
2746 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
2747 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
2748 (epoch - cinode->epoch > 0))
2749 *purge_cache = true;
2750 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
2751 (epoch - cinode->epoch > 1))
2752 *purge_cache = true;
2753 }
2754 cinode->epoch = epoch;
2755 }
2756}
2757
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002758static bool
2759smb2_is_read_op(__u32 oplock)
2760{
2761 return oplock == SMB2_OPLOCK_LEVEL_II;
2762}
2763
2764static bool
2765smb21_is_read_op(__u32 oplock)
2766{
2767 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
2768 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
2769}
2770
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002771static __le32
2772map_oplock_to_lease(u8 oplock)
2773{
2774 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
2775 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
2776 else if (oplock == SMB2_OPLOCK_LEVEL_II)
2777 return SMB2_LEASE_READ_CACHING;
2778 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
2779 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
2780 SMB2_LEASE_WRITE_CACHING;
2781 return 0;
2782}
2783
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002784static char *
2785smb2_create_lease_buf(u8 *lease_key, u8 oplock)
2786{
2787 struct create_lease *buf;
2788
2789 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
2790 if (!buf)
2791 return NULL;
2792
Stefano Brivio729c0c92018-07-05 15:10:02 +02002793 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002794 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002795
2796 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2797 (struct create_lease, lcontext));
2798 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
2799 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2800 (struct create_lease, Name));
2801 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07002802 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002803 buf->Name[0] = 'R';
2804 buf->Name[1] = 'q';
2805 buf->Name[2] = 'L';
2806 buf->Name[3] = 's';
2807 return (char *)buf;
2808}
2809
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002810static char *
2811smb3_create_lease_buf(u8 *lease_key, u8 oplock)
2812{
2813 struct create_lease_v2 *buf;
2814
2815 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
2816 if (!buf)
2817 return NULL;
2818
Stefano Brivio729c0c92018-07-05 15:10:02 +02002819 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002820 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
2821
2822 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2823 (struct create_lease_v2, lcontext));
2824 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
2825 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2826 (struct create_lease_v2, Name));
2827 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07002828 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002829 buf->Name[0] = 'R';
2830 buf->Name[1] = 'q';
2831 buf->Name[2] = 'L';
2832 buf->Name[3] = 's';
2833 return (char *)buf;
2834}
2835
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002836static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06002837smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002838{
2839 struct create_lease *lc = (struct create_lease *)buf;
2840
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002841 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002842 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
2843 return SMB2_OPLOCK_LEVEL_NOCHANGE;
2844 return le32_to_cpu(lc->lcontext.LeaseState);
2845}
2846
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002847static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06002848smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002849{
2850 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
2851
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002852 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002853 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
2854 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06002855 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02002856 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002857 return le32_to_cpu(lc->lcontext.LeaseState);
2858}
2859
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04002860static unsigned int
2861smb2_wp_retry_size(struct inode *inode)
2862{
2863 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
2864 SMB2_MAX_BUFFER_SIZE);
2865}
2866
Pavel Shilovsky52755802014-08-18 20:49:57 +04002867static bool
2868smb2_dir_needs_close(struct cifsFileInfo *cfile)
2869{
2870 return !cfile->invalidHandle;
2871}
2872
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002873static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10002874fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
2875 struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002876{
2877 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10002878 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002879
2880 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
2881 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
2882 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
2883 tr_hdr->Flags = cpu_to_le16(0x01);
2884 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CMM_NONCE);
2885 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002886}
2887
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11002888/* We can not use the normal sg_set_buf() as we will sometimes pass a
2889 * stack object as buf.
2890 */
2891static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
2892 unsigned int buflen)
2893{
2894 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
2895}
2896
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002897/* Assumes the first rqst has a transform header as the first iov.
2898 * I.e.
2899 * rqst[0].rq_iov[0] is transform header
2900 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
2901 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10002902 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002903static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002904init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002905{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002906 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002907 struct scatterlist *sg;
2908 unsigned int i;
2909 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002910 unsigned int idx = 0;
2911 int skip;
2912
2913 sg_len = 1;
2914 for (i = 0; i < num_rqst; i++)
2915 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002916
2917 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
2918 if (!sg)
2919 return NULL;
2920
2921 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002922 for (i = 0; i < num_rqst; i++) {
2923 for (j = 0; j < rqst[i].rq_nvec; j++) {
2924 /*
2925 * The first rqst has a transform header where the
2926 * first 20 bytes are not part of the encrypted blob
2927 */
2928 skip = (i == 0) && (j == 0) ? 20 : 0;
2929 smb2_sg_set_buf(&sg[idx++],
2930 rqst[i].rq_iov[j].iov_base + skip,
2931 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002932 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05002933
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002934 for (j = 0; j < rqst[i].rq_npages; j++) {
2935 unsigned int len, offset;
2936
2937 rqst_page_get_length(&rqst[i], j, &len, &offset);
2938 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
2939 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002940 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002941 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002942 return sg;
2943}
2944
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08002945static int
2946smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
2947{
2948 struct cifs_ses *ses;
2949 u8 *ses_enc_key;
2950
2951 spin_lock(&cifs_tcp_ses_lock);
2952 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
2953 if (ses->Suid != ses_id)
2954 continue;
2955 ses_enc_key = enc ? ses->smb3encryptionkey :
2956 ses->smb3decryptionkey;
2957 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
2958 spin_unlock(&cifs_tcp_ses_lock);
2959 return 0;
2960 }
2961 spin_unlock(&cifs_tcp_ses_lock);
2962
2963 return 1;
2964}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002965/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10002966 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
2967 * iov[0] - transform header (associate data),
2968 * iov[1-N] - SMB2 header and pages - data to encrypt.
2969 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002970 * untouched.
2971 */
2972static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002973crypt_message(struct TCP_Server_Info *server, int num_rqst,
2974 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002975{
2976 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002977 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002978 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002979 int rc = 0;
2980 struct scatterlist *sg;
2981 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08002982 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002983 struct aead_request *req;
2984 char *iv;
2985 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01002986 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002987 struct crypto_aead *tfm;
2988 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
2989
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08002990 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
2991 if (rc) {
2992 cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
2993 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002994 return 0;
2995 }
2996
2997 rc = smb3_crypto_aead_allocate(server);
2998 if (rc) {
2999 cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
3000 return rc;
3001 }
3002
3003 tfm = enc ? server->secmech.ccmaesencrypt :
3004 server->secmech.ccmaesdecrypt;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003005 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003006 if (rc) {
3007 cifs_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
3008 return rc;
3009 }
3010
3011 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3012 if (rc) {
3013 cifs_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
3014 return rc;
3015 }
3016
3017 req = aead_request_alloc(tfm, GFP_KERNEL);
3018 if (!req) {
3019 cifs_dbg(VFS, "%s: Failed to alloc aead request", __func__);
3020 return -ENOMEM;
3021 }
3022
3023 if (!enc) {
3024 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3025 crypt_len += SMB2_SIGNATURE_SIZE;
3026 }
3027
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003028 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003029 if (!sg) {
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003030 cifs_dbg(VFS, "%s: Failed to init sg", __func__);
3031 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003032 goto free_req;
3033 }
3034
3035 iv_len = crypto_aead_ivsize(tfm);
3036 iv = kzalloc(iv_len, GFP_KERNEL);
3037 if (!iv) {
3038 cifs_dbg(VFS, "%s: Failed to alloc IV", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003039 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003040 goto free_sg;
3041 }
3042 iv[0] = 3;
3043 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CMM_NONCE);
3044
3045 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
3046 aead_request_set_ad(req, assoc_data_len);
3047
3048 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003049 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003050
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003051 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
3052 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003053
3054 if (!rc && enc)
3055 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
3056
3057 kfree(iv);
3058free_sg:
3059 kfree(sg);
3060free_req:
3061 kfree(req);
3062 return rc;
3063}
3064
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003065void
3066smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003067{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003068 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003069
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003070 for (i = 0; i < num_rqst; i++) {
3071 if (rqst[i].rq_pages) {
3072 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
3073 put_page(rqst[i].rq_pages[j]);
3074 kfree(rqst[i].rq_pages);
3075 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003076 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003077}
3078
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003079/*
3080 * This function will initialize new_rq and encrypt the content.
3081 * The first entry, new_rq[0], only contains a single iov which contains
3082 * a smb2_transform_hdr and is pre-allocated by the caller.
3083 * This function then populates new_rq[1+] with the content from olq_rq[0+].
3084 *
3085 * The end result is an array of smb_rqst structures where the first structure
3086 * only contains a single iov for the transform header which we then can pass
3087 * to crypt_message().
3088 *
3089 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
3090 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
3091 */
3092static int
3093smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
3094 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003095{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003096 struct page **pages;
3097 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
3098 unsigned int npages;
3099 unsigned int orig_len = 0;
3100 int i, j;
3101 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003102
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003103 for (i = 1; i < num_rqst; i++) {
3104 npages = old_rq[i - 1].rq_npages;
3105 pages = kmalloc_array(npages, sizeof(struct page *),
3106 GFP_KERNEL);
3107 if (!pages)
3108 goto err_free;
3109
3110 new_rq[i].rq_pages = pages;
3111 new_rq[i].rq_npages = npages;
3112 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
3113 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
3114 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
3115 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
3116 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
3117
3118 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
3119
3120 for (j = 0; j < npages; j++) {
3121 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3122 if (!pages[j])
3123 goto err_free;
3124 }
3125
3126 /* copy pages form the old */
3127 for (j = 0; j < npages; j++) {
3128 char *dst, *src;
3129 unsigned int offset, len;
3130
3131 rqst_page_get_length(&new_rq[i], j, &len, &offset);
3132
3133 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
3134 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
3135
3136 memcpy(dst, src, len);
3137 kunmap(new_rq[i].rq_pages[j]);
3138 kunmap(old_rq[i - 1].rq_pages[j]);
3139 }
3140 }
3141
3142 /* fill the 1st iov with a transform header */
3143 fill_transform_hdr(tr_hdr, orig_len, old_rq);
3144
3145 rc = crypt_message(server, num_rqst, new_rq, 1);
3146 cifs_dbg(FYI, "encrypt message returned %d", rc);
3147 if (rc)
3148 goto err_free;
3149
3150 return rc;
3151
3152err_free:
3153 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
3154 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003155}
3156
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003157static int
3158smb3_is_transform_hdr(void *buf)
3159{
3160 struct smb2_transform_hdr *trhdr = buf;
3161
3162 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
3163}
3164
3165static int
3166decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
3167 unsigned int buf_data_size, struct page **pages,
3168 unsigned int npages, unsigned int page_data_size)
3169{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003170 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003171 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003172 int rc;
3173
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003174 iov[0].iov_base = buf;
3175 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
3176 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
3177 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003178
3179 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003180 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003181 rqst.rq_pages = pages;
3182 rqst.rq_npages = npages;
3183 rqst.rq_pagesz = PAGE_SIZE;
3184 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
3185
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003186 rc = crypt_message(server, 1, &rqst, 0);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003187 cifs_dbg(FYI, "decrypt message returned %d\n", rc);
3188
3189 if (rc)
3190 return rc;
3191
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003192 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003193
3194 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003195
3196 return rc;
3197}
3198
3199static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003200read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
3201 unsigned int npages, unsigned int len)
3202{
3203 int i;
3204 int length;
3205
3206 for (i = 0; i < npages; i++) {
3207 struct page *page = pages[i];
3208 size_t n;
3209
3210 n = len;
3211 if (len >= PAGE_SIZE) {
3212 /* enough data to fill the page */
3213 n = PAGE_SIZE;
3214 len -= n;
3215 } else {
3216 zero_user(page, len, PAGE_SIZE - len);
3217 len = 0;
3218 }
Long Li1dbe3462018-05-30 12:47:55 -07003219 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003220 if (length < 0)
3221 return length;
3222 server->total_read += length;
3223 }
3224
3225 return 0;
3226}
3227
3228static int
3229init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
3230 unsigned int cur_off, struct bio_vec **page_vec)
3231{
3232 struct bio_vec *bvec;
3233 int i;
3234
3235 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
3236 if (!bvec)
3237 return -ENOMEM;
3238
3239 for (i = 0; i < npages; i++) {
3240 bvec[i].bv_page = pages[i];
3241 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
3242 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
3243 data_size -= bvec[i].bv_len;
3244 }
3245
3246 if (data_size != 0) {
3247 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
3248 kfree(bvec);
3249 return -EIO;
3250 }
3251
3252 *page_vec = bvec;
3253 return 0;
3254}
3255
3256static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003257handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3258 char *buf, unsigned int buf_len, struct page **pages,
3259 unsigned int npages, unsigned int page_data_size)
3260{
3261 unsigned int data_offset;
3262 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003263 unsigned int cur_off;
3264 unsigned int cur_page_idx;
3265 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003266 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10003267 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003268 struct bio_vec *bvec = NULL;
3269 struct iov_iter iter;
3270 struct kvec iov;
3271 int length;
Long Li74dcf412017-11-22 17:38:46 -07003272 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003273
3274 if (shdr->Command != SMB2_READ) {
3275 cifs_dbg(VFS, "only big read responses are supported\n");
3276 return -ENOTSUPP;
3277 }
3278
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003279 if (server->ops->is_session_expired &&
3280 server->ops->is_session_expired(buf)) {
3281 cifs_reconnect(server);
3282 wake_up(&server->response_q);
3283 return -1;
3284 }
3285
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003286 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08003287 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003288 return -1;
3289
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003290 /* set up first two iov to get credits */
3291 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003292 rdata->iov[0].iov_len = 0;
3293 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003294 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003295 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003296 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3297 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3298 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3299 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3300
3301 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003302 if (rdata->result != 0) {
3303 cifs_dbg(FYI, "%s: server returned error %d\n",
3304 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003305 /* normal error on read response */
3306 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003307 return 0;
3308 }
3309
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003310 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07003311#ifdef CONFIG_CIFS_SMB_DIRECT
3312 use_rdma_mr = rdata->mr;
3313#endif
3314 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003315
3316 if (data_offset < server->vals->read_rsp_size) {
3317 /*
3318 * win2k8 sometimes sends an offset of 0 when the read
3319 * is beyond the EOF. Treat it as if the data starts just after
3320 * the header.
3321 */
3322 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
3323 __func__, data_offset);
3324 data_offset = server->vals->read_rsp_size;
3325 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
3326 /* data_offset is beyond the end of smallbuf */
3327 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
3328 __func__, data_offset);
3329 rdata->result = -EIO;
3330 dequeue_mid(mid, rdata->result);
3331 return 0;
3332 }
3333
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003334 pad_len = data_offset - server->vals->read_rsp_size;
3335
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003336 if (buf_len <= data_offset) {
3337 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003338 cur_page_idx = pad_len / PAGE_SIZE;
3339 cur_off = pad_len % PAGE_SIZE;
3340
3341 if (cur_page_idx != 0) {
3342 /* data offset is beyond the 1st page of response */
3343 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
3344 __func__, data_offset);
3345 rdata->result = -EIO;
3346 dequeue_mid(mid, rdata->result);
3347 return 0;
3348 }
3349
3350 if (data_len > page_data_size - pad_len) {
3351 /* data_len is corrupt -- discard frame */
3352 rdata->result = -EIO;
3353 dequeue_mid(mid, rdata->result);
3354 return 0;
3355 }
3356
3357 rdata->result = init_read_bvec(pages, npages, page_data_size,
3358 cur_off, &bvec);
3359 if (rdata->result != 0) {
3360 dequeue_mid(mid, rdata->result);
3361 return 0;
3362 }
3363
David Howellsaa563d72018-10-20 00:57:56 +01003364 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003365 } else if (buf_len >= data_offset + data_len) {
3366 /* read response payload is in buf */
3367 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
3368 iov.iov_base = buf + data_offset;
3369 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01003370 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003371 } else {
3372 /* read response payload cannot be in both buf and pages */
3373 WARN_ONCE(1, "buf can not contain only a part of read data");
3374 rdata->result = -EIO;
3375 dequeue_mid(mid, rdata->result);
3376 return 0;
3377 }
3378
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003379 length = rdata->copy_into_pages(server, rdata, &iter);
3380
3381 kfree(bvec);
3382
3383 if (length < 0)
3384 return length;
3385
3386 dequeue_mid(mid, false);
3387 return length;
3388}
3389
3390static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003391receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
3392{
3393 char *buf = server->smallbuf;
3394 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
3395 unsigned int npages;
3396 struct page **pages;
3397 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003398 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003399 int rc;
3400 int i = 0;
3401
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003402 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003403 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
3404
3405 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
3406 if (rc < 0)
3407 return rc;
3408 server->total_read += rc;
3409
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003410 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11003411 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003412 npages = DIV_ROUND_UP(len, PAGE_SIZE);
3413
3414 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
3415 if (!pages) {
3416 rc = -ENOMEM;
3417 goto discard_data;
3418 }
3419
3420 for (; i < npages; i++) {
3421 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3422 if (!pages[i]) {
3423 rc = -ENOMEM;
3424 goto discard_data;
3425 }
3426 }
3427
3428 /* read read data into pages */
3429 rc = read_data_into_pages(server, pages, npages, len);
3430 if (rc)
3431 goto free_pages;
3432
Pavel Shilovsky350be252017-04-10 10:31:33 -07003433 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003434 if (rc)
3435 goto free_pages;
3436
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003437 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003438 pages, npages, len);
3439 if (rc)
3440 goto free_pages;
3441
3442 *mid = smb2_find_mid(server, buf);
3443 if (*mid == NULL)
3444 cifs_dbg(FYI, "mid not found\n");
3445 else {
3446 cifs_dbg(FYI, "mid found\n");
3447 (*mid)->decrypted = true;
3448 rc = handle_read_data(server, *mid, buf,
3449 server->vals->read_rsp_size,
3450 pages, npages, len);
3451 }
3452
3453free_pages:
3454 for (i = i - 1; i >= 0; i--)
3455 put_page(pages[i]);
3456 kfree(pages);
3457 return rc;
3458discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07003459 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003460 goto free_pages;
3461}
3462
3463static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003464receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003465 struct mid_q_entry **mids, char **bufs,
3466 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003467{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003468 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003469 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003470 char *tmpbuf;
3471 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10003472 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003473 unsigned int buf_size;
3474 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003475 int next_is_large;
3476 char *next_buffer = NULL;
3477
3478 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003479
3480 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003481 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003482 server->large_buf = true;
3483 memcpy(server->bigbuf, buf, server->total_read);
3484 buf = server->bigbuf;
3485 }
3486
3487 /* now read the rest */
3488 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003489 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003490 if (length < 0)
3491 return length;
3492 server->total_read += length;
3493
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003494 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003495 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
3496 if (length)
3497 return length;
3498
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003499 next_is_large = server->large_buf;
3500 one_more:
3501 shdr = (struct smb2_sync_hdr *)buf;
3502 if (shdr->NextCommand) {
3503 if (next_is_large) {
3504 tmpbuf = server->bigbuf;
3505 next_buffer = (char *)cifs_buf_get();
3506 } else {
3507 tmpbuf = server->smallbuf;
3508 next_buffer = (char *)cifs_small_buf_get();
3509 }
3510 memcpy(next_buffer,
3511 tmpbuf + le32_to_cpu(shdr->NextCommand),
3512 pdu_length - le32_to_cpu(shdr->NextCommand));
3513 }
3514
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003515 mid_entry = smb2_find_mid(server, buf);
3516 if (mid_entry == NULL)
3517 cifs_dbg(FYI, "mid not found\n");
3518 else {
3519 cifs_dbg(FYI, "mid found\n");
3520 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003521 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003522 }
3523
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003524 if (*num_mids >= MAX_COMPOUND) {
3525 cifs_dbg(VFS, "too many PDUs in compound\n");
3526 return -1;
3527 }
3528 bufs[*num_mids] = buf;
3529 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003530
3531 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003532 ret = mid_entry->handle(server, mid_entry);
3533 else
3534 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003535
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003536 if (ret == 0 && shdr->NextCommand) {
3537 pdu_length -= le32_to_cpu(shdr->NextCommand);
3538 server->large_buf = next_is_large;
3539 if (next_is_large)
3540 server->bigbuf = next_buffer;
3541 else
3542 server->smallbuf = next_buffer;
3543
3544 buf += le32_to_cpu(shdr->NextCommand);
3545 goto one_more;
3546 }
3547
3548 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003549}
3550
3551static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003552smb3_receive_transform(struct TCP_Server_Info *server,
3553 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003554{
3555 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10003556 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003557 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
3558 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3559
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003560 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003561 sizeof(struct smb2_sync_hdr)) {
3562 cifs_dbg(VFS, "Transform message is too small (%u)\n",
3563 pdu_length);
3564 cifs_reconnect(server);
3565 wake_up(&server->response_q);
3566 return -ECONNABORTED;
3567 }
3568
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003569 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003570 cifs_dbg(VFS, "Transform message is broken\n");
3571 cifs_reconnect(server);
3572 wake_up(&server->response_q);
3573 return -ECONNABORTED;
3574 }
3575
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003576 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08003577 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
3578 *num_mids = 1;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003579 return receive_encrypted_read(server, &mids[0]);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08003580 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003581
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003582 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003583}
3584
3585int
3586smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
3587{
3588 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
3589
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003590 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003591 NULL, 0, 0);
3592}
3593
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10003594static int
3595smb2_next_header(char *buf)
3596{
3597 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
3598 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
3599
3600 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
3601 return sizeof(struct smb2_transform_hdr) +
3602 le32_to_cpu(t_hdr->OriginalMessageSize);
3603
3604 return le32_to_cpu(hdr->NextCommand);
3605}
3606
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003607struct smb_version_operations smb20_operations = {
3608 .compare_fids = smb2_compare_fids,
3609 .setup_request = smb2_setup_request,
3610 .setup_async_request = smb2_setup_async_request,
3611 .check_receive = smb2_check_receive,
3612 .add_credits = smb2_add_credits,
3613 .set_credits = smb2_set_credits,
3614 .get_credits_field = smb2_get_credits_field,
3615 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04003616 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003617 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08003618 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003619 .read_data_offset = smb2_read_data_offset,
3620 .read_data_length = smb2_read_data_length,
3621 .map_error = map_smb2_to_linux_error,
3622 .find_mid = smb2_find_mid,
3623 .check_message = smb2_check_message,
3624 .dump_detail = smb2_dump_detail,
3625 .clear_stats = smb2_clear_stats,
3626 .print_stats = smb2_print_stats,
3627 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08003628 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003629 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003630 .need_neg = smb2_need_neg,
3631 .negotiate = smb2_negotiate,
3632 .negotiate_wsize = smb2_negotiate_wsize,
3633 .negotiate_rsize = smb2_negotiate_rsize,
3634 .sess_setup = SMB2_sess_setup,
3635 .logoff = SMB2_logoff,
3636 .tree_connect = SMB2_tcon,
3637 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05003638 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003639 .is_path_accessible = smb2_is_path_accessible,
3640 .can_echo = smb2_can_echo,
3641 .echo = SMB2_echo,
3642 .query_path_info = smb2_query_path_info,
3643 .get_srv_inum = smb2_get_srv_inum,
3644 .query_file_info = smb2_query_file_info,
3645 .set_path_size = smb2_set_path_size,
3646 .set_file_size = smb2_set_file_size,
3647 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05003648 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003649 .mkdir = smb2_mkdir,
3650 .mkdir_setinfo = smb2_mkdir_setinfo,
3651 .rmdir = smb2_rmdir,
3652 .unlink = smb2_unlink,
3653 .rename = smb2_rename_path,
3654 .create_hardlink = smb2_create_hardlink,
3655 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01003656 .query_mf_symlink = smb3_query_mf_symlink,
3657 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003658 .open = smb2_open_file,
3659 .set_fid = smb2_set_fid,
3660 .close = smb2_close_file,
3661 .flush = smb2_flush_file,
3662 .async_readv = smb2_async_readv,
3663 .async_writev = smb2_async_writev,
3664 .sync_read = smb2_sync_read,
3665 .sync_write = smb2_sync_write,
3666 .query_dir_first = smb2_query_dir_first,
3667 .query_dir_next = smb2_query_dir_next,
3668 .close_dir = smb2_close_dir,
3669 .calc_smb_size = smb2_calc_size,
3670 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003671 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003672 .oplock_response = smb2_oplock_response,
3673 .queryfs = smb2_queryfs,
3674 .mand_lock = smb2_mand_lock,
3675 .mand_unlock_range = smb2_unlock_range,
3676 .push_mand_locks = smb2_push_mandatory_locks,
3677 .get_lease_key = smb2_get_lease_key,
3678 .set_lease_key = smb2_set_lease_key,
3679 .new_lease_key = smb2_new_lease_key,
3680 .calc_signature = smb2_calc_signature,
3681 .is_read_op = smb2_is_read_op,
3682 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003683 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003684 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05003685 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003686 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04003687 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01003688 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05303689 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003690#ifdef CONFIG_CIFS_XATTR
3691 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10003692 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003693#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003694#ifdef CONFIG_CIFS_ACL
3695 .get_acl = get_smb2_acl,
3696 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003697 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003698#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10003699 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003700 .ioctl_query_info = smb2_ioctl_query_info,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003701};
3702
Steve French1080ef72011-02-24 18:07:19 +00003703struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07003704 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04003705 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04003706 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04003707 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04003708 .add_credits = smb2_add_credits,
3709 .set_credits = smb2_set_credits,
3710 .get_credits_field = smb2_get_credits_field,
3711 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04003712 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003713 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04003714 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08003715 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003716 .read_data_offset = smb2_read_data_offset,
3717 .read_data_length = smb2_read_data_length,
3718 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04003719 .find_mid = smb2_find_mid,
3720 .check_message = smb2_check_message,
3721 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04003722 .clear_stats = smb2_clear_stats,
3723 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07003724 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08003725 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003726 .downgrade_oplock = smb21_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04003727 .need_neg = smb2_need_neg,
3728 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07003729 .negotiate_wsize = smb2_negotiate_wsize,
3730 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04003731 .sess_setup = SMB2_sess_setup,
3732 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04003733 .tree_connect = SMB2_tcon,
3734 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05003735 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003736 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003737 .can_echo = smb2_can_echo,
3738 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003739 .query_path_info = smb2_query_path_info,
3740 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07003741 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07003742 .set_path_size = smb2_set_path_size,
3743 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07003744 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05003745 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04003746 .mkdir = smb2_mkdir,
3747 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04003748 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07003749 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07003750 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07003751 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003752 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05003753 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05003754 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003755 .open = smb2_open_file,
3756 .set_fid = smb2_set_fid,
3757 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003758 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003759 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07003760 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07003761 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07003762 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07003763 .query_dir_first = smb2_query_dir_first,
3764 .query_dir_next = smb2_query_dir_next,
3765 .close_dir = smb2_close_dir,
3766 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07003767 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003768 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07003769 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07003770 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07003771 .mand_lock = smb2_mand_lock,
3772 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07003773 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07003774 .get_lease_key = smb2_get_lease_key,
3775 .set_lease_key = smb2_set_lease_key,
3776 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06003777 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003778 .is_read_op = smb21_is_read_op,
3779 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003780 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003781 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05003782 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003783 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04003784 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05003785 .enum_snapshots = smb3_enum_snapshots,
Aurelien Aptel9d496402017-02-13 16:16:49 +01003786 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05303787 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003788#ifdef CONFIG_CIFS_XATTR
3789 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10003790 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003791#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003792#ifdef CONFIG_CIFS_ACL
3793 .get_acl = get_smb2_acl,
3794 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003795 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003796#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10003797 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003798 .ioctl_query_info = smb2_ioctl_query_info,
Steve French38107d42012-12-08 22:08:06 -06003799};
3800
Steve French38107d42012-12-08 22:08:06 -06003801struct smb_version_operations smb30_operations = {
3802 .compare_fids = smb2_compare_fids,
3803 .setup_request = smb2_setup_request,
3804 .setup_async_request = smb2_setup_async_request,
3805 .check_receive = smb2_check_receive,
3806 .add_credits = smb2_add_credits,
3807 .set_credits = smb2_set_credits,
3808 .get_credits_field = smb2_get_credits_field,
3809 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04003810 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003811 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06003812 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08003813 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06003814 .read_data_offset = smb2_read_data_offset,
3815 .read_data_length = smb2_read_data_length,
3816 .map_error = map_smb2_to_linux_error,
3817 .find_mid = smb2_find_mid,
3818 .check_message = smb2_check_message,
3819 .dump_detail = smb2_dump_detail,
3820 .clear_stats = smb2_clear_stats,
3821 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05003822 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06003823 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08003824 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003825 .downgrade_oplock = smb21_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06003826 .need_neg = smb2_need_neg,
3827 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05003828 .negotiate_wsize = smb3_negotiate_wsize,
3829 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06003830 .sess_setup = SMB2_sess_setup,
3831 .logoff = SMB2_logoff,
3832 .tree_connect = SMB2_tcon,
3833 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05003834 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06003835 .is_path_accessible = smb2_is_path_accessible,
3836 .can_echo = smb2_can_echo,
3837 .echo = SMB2_echo,
3838 .query_path_info = smb2_query_path_info,
3839 .get_srv_inum = smb2_get_srv_inum,
3840 .query_file_info = smb2_query_file_info,
3841 .set_path_size = smb2_set_path_size,
3842 .set_file_size = smb2_set_file_size,
3843 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05003844 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06003845 .mkdir = smb2_mkdir,
3846 .mkdir_setinfo = smb2_mkdir_setinfo,
3847 .rmdir = smb2_rmdir,
3848 .unlink = smb2_unlink,
3849 .rename = smb2_rename_path,
3850 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003851 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05003852 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05003853 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06003854 .open = smb2_open_file,
3855 .set_fid = smb2_set_fid,
3856 .close = smb2_close_file,
3857 .flush = smb2_flush_file,
3858 .async_readv = smb2_async_readv,
3859 .async_writev = smb2_async_writev,
3860 .sync_read = smb2_sync_read,
3861 .sync_write = smb2_sync_write,
3862 .query_dir_first = smb2_query_dir_first,
3863 .query_dir_next = smb2_query_dir_next,
3864 .close_dir = smb2_close_dir,
3865 .calc_smb_size = smb2_calc_size,
3866 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003867 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06003868 .oplock_response = smb2_oplock_response,
3869 .queryfs = smb2_queryfs,
3870 .mand_lock = smb2_mand_lock,
3871 .mand_unlock_range = smb2_unlock_range,
3872 .push_mand_locks = smb2_push_mandatory_locks,
3873 .get_lease_key = smb2_get_lease_key,
3874 .set_lease_key = smb2_set_lease_key,
3875 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06003876 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06003877 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05003878 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003879 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003880 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003881 .create_lease_buf = smb3_create_lease_buf,
3882 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05003883 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05003884 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06003885 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003886 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04003887 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05003888 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05003889 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003890 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003891 .is_transform_hdr = smb3_is_transform_hdr,
3892 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01003893 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05303894 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003895#ifdef CONFIG_CIFS_XATTR
3896 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10003897 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003898#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003899#ifdef CONFIG_CIFS_ACL
3900 .get_acl = get_smb2_acl,
3901 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003902 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003903#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10003904 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003905 .ioctl_query_info = smb2_ioctl_query_info,
Steve French1080ef72011-02-24 18:07:19 +00003906};
3907
Steve Frenchaab18932015-06-23 23:37:11 -05003908struct smb_version_operations smb311_operations = {
3909 .compare_fids = smb2_compare_fids,
3910 .setup_request = smb2_setup_request,
3911 .setup_async_request = smb2_setup_async_request,
3912 .check_receive = smb2_check_receive,
3913 .add_credits = smb2_add_credits,
3914 .set_credits = smb2_set_credits,
3915 .get_credits_field = smb2_get_credits_field,
3916 .get_credits = smb2_get_credits,
3917 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003918 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05003919 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08003920 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05003921 .read_data_offset = smb2_read_data_offset,
3922 .read_data_length = smb2_read_data_length,
3923 .map_error = map_smb2_to_linux_error,
3924 .find_mid = smb2_find_mid,
3925 .check_message = smb2_check_message,
3926 .dump_detail = smb2_dump_detail,
3927 .clear_stats = smb2_clear_stats,
3928 .print_stats = smb2_print_stats,
3929 .dump_share_caps = smb2_dump_share_caps,
3930 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08003931 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003932 .downgrade_oplock = smb21_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05003933 .need_neg = smb2_need_neg,
3934 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05003935 .negotiate_wsize = smb3_negotiate_wsize,
3936 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05003937 .sess_setup = SMB2_sess_setup,
3938 .logoff = SMB2_logoff,
3939 .tree_connect = SMB2_tcon,
3940 .tree_disconnect = SMB2_tdis,
3941 .qfs_tcon = smb3_qfs_tcon,
3942 .is_path_accessible = smb2_is_path_accessible,
3943 .can_echo = smb2_can_echo,
3944 .echo = SMB2_echo,
3945 .query_path_info = smb2_query_path_info,
3946 .get_srv_inum = smb2_get_srv_inum,
3947 .query_file_info = smb2_query_file_info,
3948 .set_path_size = smb2_set_path_size,
3949 .set_file_size = smb2_set_file_size,
3950 .set_file_info = smb2_set_file_info,
3951 .set_compression = smb2_set_compression,
3952 .mkdir = smb2_mkdir,
3953 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05003954 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05003955 .rmdir = smb2_rmdir,
3956 .unlink = smb2_unlink,
3957 .rename = smb2_rename_path,
3958 .create_hardlink = smb2_create_hardlink,
3959 .query_symlink = smb2_query_symlink,
3960 .query_mf_symlink = smb3_query_mf_symlink,
3961 .create_mf_symlink = smb3_create_mf_symlink,
3962 .open = smb2_open_file,
3963 .set_fid = smb2_set_fid,
3964 .close = smb2_close_file,
3965 .flush = smb2_flush_file,
3966 .async_readv = smb2_async_readv,
3967 .async_writev = smb2_async_writev,
3968 .sync_read = smb2_sync_read,
3969 .sync_write = smb2_sync_write,
3970 .query_dir_first = smb2_query_dir_first,
3971 .query_dir_next = smb2_query_dir_next,
3972 .close_dir = smb2_close_dir,
3973 .calc_smb_size = smb2_calc_size,
3974 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003975 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05003976 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05003977 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05003978 .mand_lock = smb2_mand_lock,
3979 .mand_unlock_range = smb2_unlock_range,
3980 .push_mand_locks = smb2_push_mandatory_locks,
3981 .get_lease_key = smb2_get_lease_key,
3982 .set_lease_key = smb2_set_lease_key,
3983 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06003984 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05003985 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05003986 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05003987 .is_read_op = smb21_is_read_op,
3988 .set_oplock_level = smb3_set_oplock_level,
3989 .create_lease_buf = smb3_create_lease_buf,
3990 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05003991 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07003992 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05003993/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
3994 .wp_retry_size = smb2_wp_retry_size,
3995 .dir_needs_close = smb2_dir_needs_close,
3996 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05003997 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003998 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003999 .is_transform_hdr = smb3_is_transform_hdr,
4000 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004001 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304002 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004003#ifdef CONFIG_CIFS_XATTR
4004 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004005 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004006#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10004007#ifdef CONFIG_CIFS_ACL
4008 .get_acl = get_smb2_acl,
4009 .get_acl_by_fid = get_smb2_acl_by_fid,
4010 .set_acl = set_smb2_acl,
4011#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004012 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004013 .ioctl_query_info = smb2_ioctl_query_info,
Steve Frenchaab18932015-06-23 23:37:11 -05004014};
Steve Frenchaab18932015-06-23 23:37:11 -05004015
Steve Frenchdd446b12012-11-28 23:21:06 -06004016struct smb_version_values smb20_values = {
4017 .version_string = SMB20_VERSION_STRING,
4018 .protocol_id = SMB20_PROT_ID,
4019 .req_capabilities = 0, /* MBZ */
4020 .large_lock_type = 0,
4021 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4022 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4023 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004024 .header_size = sizeof(struct smb2_sync_hdr),
4025 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06004026 .max_header_size = MAX_SMB2_HDR_SIZE,
4027 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4028 .lock_cmd = SMB2_LOCK,
4029 .cap_unix = 0,
4030 .cap_nt_find = SMB2_NT_FIND,
4031 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004032 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4033 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004034 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06004035};
4036
Steve French1080ef72011-02-24 18:07:19 +00004037struct smb_version_values smb21_values = {
4038 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004039 .protocol_id = SMB21_PROT_ID,
4040 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
4041 .large_lock_type = 0,
4042 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4043 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4044 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004045 .header_size = sizeof(struct smb2_sync_hdr),
4046 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004047 .max_header_size = MAX_SMB2_HDR_SIZE,
4048 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4049 .lock_cmd = SMB2_LOCK,
4050 .cap_unix = 0,
4051 .cap_nt_find = SMB2_NT_FIND,
4052 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004053 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4054 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004055 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05004056};
4057
Steve French9764c022017-09-17 10:41:35 -05004058struct smb_version_values smb3any_values = {
4059 .version_string = SMB3ANY_VERSION_STRING,
4060 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004061 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004062 .large_lock_type = 0,
4063 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4064 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4065 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004066 .header_size = sizeof(struct smb2_sync_hdr),
4067 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004068 .max_header_size = MAX_SMB2_HDR_SIZE,
4069 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4070 .lock_cmd = SMB2_LOCK,
4071 .cap_unix = 0,
4072 .cap_nt_find = SMB2_NT_FIND,
4073 .cap_large_files = SMB2_LARGE_FILES,
4074 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4075 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4076 .create_lease_size = sizeof(struct create_lease_v2),
4077};
4078
4079struct smb_version_values smbdefault_values = {
4080 .version_string = SMBDEFAULT_VERSION_STRING,
4081 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004082 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004083 .large_lock_type = 0,
4084 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4085 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4086 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004087 .header_size = sizeof(struct smb2_sync_hdr),
4088 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004089 .max_header_size = MAX_SMB2_HDR_SIZE,
4090 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4091 .lock_cmd = SMB2_LOCK,
4092 .cap_unix = 0,
4093 .cap_nt_find = SMB2_NT_FIND,
4094 .cap_large_files = SMB2_LARGE_FILES,
4095 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4096 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4097 .create_lease_size = sizeof(struct create_lease_v2),
4098};
4099
Steve Frenche4aa25e2012-10-01 12:26:22 -05004100struct smb_version_values smb30_values = {
4101 .version_string = SMB30_VERSION_STRING,
4102 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004103 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004104 .large_lock_type = 0,
4105 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4106 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4107 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004108 .header_size = sizeof(struct smb2_sync_hdr),
4109 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004110 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004111 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004112 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004113 .cap_unix = 0,
4114 .cap_nt_find = SMB2_NT_FIND,
4115 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004116 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4117 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004118 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00004119};
Steve French20b6d8b2013-06-12 22:48:41 -05004120
4121struct smb_version_values smb302_values = {
4122 .version_string = SMB302_VERSION_STRING,
4123 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004124 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05004125 .large_lock_type = 0,
4126 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4127 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4128 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004129 .header_size = sizeof(struct smb2_sync_hdr),
4130 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05004131 .max_header_size = MAX_SMB2_HDR_SIZE,
4132 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4133 .lock_cmd = SMB2_LOCK,
4134 .cap_unix = 0,
4135 .cap_nt_find = SMB2_NT_FIND,
4136 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004137 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4138 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004139 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05004140};
Steve French5f7fbf72014-12-17 22:52:58 -06004141
Steve French5f7fbf72014-12-17 22:52:58 -06004142struct smb_version_values smb311_values = {
4143 .version_string = SMB311_VERSION_STRING,
4144 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004145 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06004146 .large_lock_type = 0,
4147 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4148 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4149 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004150 .header_size = sizeof(struct smb2_sync_hdr),
4151 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06004152 .max_header_size = MAX_SMB2_HDR_SIZE,
4153 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4154 .lock_cmd = SMB2_LOCK,
4155 .cap_unix = 0,
4156 .cap_nt_find = SMB2_NT_FIND,
4157 .cap_large_files = SMB2_LARGE_FILES,
4158 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4159 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4160 .create_lease_size = sizeof(struct create_lease_v2),
4161};