blob: 6c99a146fcec7f6f510ab31775f4858dbc7ca213 [file] [log] [blame]
Steve French1080ef72011-02-24 18:07:19 +00001/*
2 * SMB2 version specific operations
3 *
4 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
5 *
6 * This library is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License v2 as published
8 * by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public License
16 * along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -070020#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070021#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050022#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070023#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020024#include <linux/uuid.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070025#include <crypto/aead.h>
Steve French1080ef72011-02-24 18:07:19 +000026#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040027#include "smb2pdu.h"
28#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040029#include "cifsproto.h"
30#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040031#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070032#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070033#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050034#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070035#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040036
37static int
38change_conf(struct TCP_Server_Info *server)
39{
40 server->credits += server->echo_credits + server->oplock_credits;
41 server->oplock_credits = server->echo_credits = 0;
42 switch (server->credits) {
43 case 0:
44 return -1;
45 case 1:
46 server->echoes = false;
47 server->oplocks = false;
Joe Perchesf96637b2013-05-04 22:12:25 -050048 cifs_dbg(VFS, "disabling echoes and oplocks\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040049 break;
50 case 2:
51 server->echoes = true;
52 server->oplocks = false;
53 server->echo_credits = 1;
Joe Perchesf96637b2013-05-04 22:12:25 -050054 cifs_dbg(FYI, "disabling oplocks\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040055 break;
56 default:
57 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050058 if (enable_oplocks) {
59 server->oplocks = true;
60 server->oplock_credits = 1;
61 } else
62 server->oplocks = false;
63
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040064 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040065 }
66 server->credits -= server->echo_credits + server->oplock_credits;
67 return 0;
68}
69
70static void
71smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
72 const int optype)
73{
74 int *val, rc = 0;
75 spin_lock(&server->req_lock);
76 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050077
78 /* eg found case where write overlapping reconnect messed up credits */
79 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
80 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
81 server->hostname, *val);
82
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040083 *val += add;
Steve French141891f2016-09-23 00:44:16 -050084 if (*val > 65000) {
85 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
86 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
87 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040088 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040089 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040090 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070091 /*
92 * Sometimes server returns 0 credits on oplock break ack - we need to
93 * rebalance credits in this case.
94 */
95 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
96 server->oplocks) {
97 if (server->credits > 1) {
98 server->credits--;
99 server->oplock_credits++;
100 }
101 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400102 spin_unlock(&server->req_lock);
103 wake_up(&server->request_q);
104 if (rc)
105 cifs_reconnect(server);
106}
107
108static void
109smb2_set_credits(struct TCP_Server_Info *server, const int val)
110{
111 spin_lock(&server->req_lock);
112 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500113 if (val == 1)
114 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400115 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500116 /* don't log while holding the lock */
117 if (val == 1)
118 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400119}
120
121static int *
122smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
123{
124 switch (optype) {
125 case CIFS_ECHO_OP:
126 return &server->echo_credits;
127 case CIFS_OBREAK_OP:
128 return &server->oplock_credits;
129 default:
130 return &server->credits;
131 }
132}
133
134static unsigned int
135smb2_get_credits(struct mid_q_entry *mid)
136{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000137 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700138
139 return le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400140}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400141
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400142static int
143smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
144 unsigned int *num, unsigned int *credits)
145{
146 int rc = 0;
147 unsigned int scredits;
148
149 spin_lock(&server->req_lock);
150 while (1) {
151 if (server->credits <= 0) {
152 spin_unlock(&server->req_lock);
153 cifs_num_waiters_inc(server);
154 rc = wait_event_killable(server->request_q,
155 has_credits(server, &server->credits));
156 cifs_num_waiters_dec(server);
157 if (rc)
158 return rc;
159 spin_lock(&server->req_lock);
160 } else {
161 if (server->tcpStatus == CifsExiting) {
162 spin_unlock(&server->req_lock);
163 return -ENOENT;
164 }
165
166 scredits = server->credits;
167 /* can deadlock with reopen */
168 if (scredits == 1) {
169 *num = SMB2_MAX_BUFFER_SIZE;
170 *credits = 0;
171 break;
172 }
173
174 /* leave one credit for a possible reopen */
175 scredits--;
176 *num = min_t(unsigned int, size,
177 scredits * SMB2_MAX_BUFFER_SIZE);
178
179 *credits = DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
180 server->credits -= *credits;
181 server->in_flight++;
182 break;
183 }
184 }
185 spin_unlock(&server->req_lock);
186 return rc;
187}
188
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400189static __u64
190smb2_get_next_mid(struct TCP_Server_Info *server)
191{
192 __u64 mid;
193 /* for SMB2 we need the current value */
194 spin_lock(&GlobalMid_Lock);
195 mid = server->CurrentMid++;
196 spin_unlock(&GlobalMid_Lock);
197 return mid;
198}
Steve French1080ef72011-02-24 18:07:19 +0000199
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400200static struct mid_q_entry *
201smb2_find_mid(struct TCP_Server_Info *server, char *buf)
202{
203 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000204 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700205 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400206
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700207 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Steve French373512e2015-12-18 13:05:30 -0600208 cifs_dbg(VFS, "encrypted frame parsing not supported yet");
209 return NULL;
210 }
211
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400212 spin_lock(&GlobalMid_Lock);
213 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000214 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400215 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700216 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200217 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400218 spin_unlock(&GlobalMid_Lock);
219 return mid;
220 }
221 }
222 spin_unlock(&GlobalMid_Lock);
223 return NULL;
224}
225
226static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600227smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400228{
229#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000230 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400231
Joe Perchesf96637b2013-05-04 22:12:25 -0500232 cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700233 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
234 shdr->ProcessId);
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600235 cifs_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500236 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400237#endif
238}
239
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400240static bool
241smb2_need_neg(struct TCP_Server_Info *server)
242{
243 return server->max_read == 0;
244}
245
246static int
247smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
248{
249 int rc;
250 ses->server->CurrentMid = 0;
251 rc = SMB2_negotiate(xid, ses);
252 /* BB we probably don't need to retry with modern servers */
253 if (rc == -EAGAIN)
254 rc = -EHOSTDOWN;
255 return rc;
256}
257
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700258static unsigned int
259smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
260{
261 struct TCP_Server_Info *server = tcon->ses->server;
262 unsigned int wsize;
263
264 /* start with specified wsize, or default */
265 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
266 wsize = min_t(unsigned int, wsize, server->max_write);
Long Li09902f82017-11-22 17:38:39 -0700267#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700268 if (server->rdma) {
269 if (server->sign)
270 wsize = min_t(unsigned int,
271 wsize, server->smbd_conn->max_fragmented_send_size);
272 else
273 wsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700274 wsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700275 }
Long Li09902f82017-11-22 17:38:39 -0700276#endif
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400277 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
278 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700279
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700280 return wsize;
281}
282
283static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500284smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
285{
286 struct TCP_Server_Info *server = tcon->ses->server;
287 unsigned int wsize;
288
289 /* start with specified wsize, or default */
290 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
291 wsize = min_t(unsigned int, wsize, server->max_write);
292#ifdef CONFIG_CIFS_SMB_DIRECT
293 if (server->rdma) {
294 if (server->sign)
295 wsize = min_t(unsigned int,
296 wsize, server->smbd_conn->max_fragmented_send_size);
297 else
298 wsize = min_t(unsigned int,
299 wsize, server->smbd_conn->max_readwrite_size);
300 }
301#endif
302 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
303 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
304
305 return wsize;
306}
307
308static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700309smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
310{
311 struct TCP_Server_Info *server = tcon->ses->server;
312 unsigned int rsize;
313
314 /* start with specified rsize, or default */
315 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
316 rsize = min_t(unsigned int, rsize, server->max_read);
Long Li09902f82017-11-22 17:38:39 -0700317#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700318 if (server->rdma) {
319 if (server->sign)
320 rsize = min_t(unsigned int,
321 rsize, server->smbd_conn->max_fragmented_recv_size);
322 else
323 rsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700324 rsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700325 }
Long Li09902f82017-11-22 17:38:39 -0700326#endif
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400327
328 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
329 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700330
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700331 return rsize;
332}
333
Steve French3d621232018-09-25 15:33:47 -0500334static unsigned int
335smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
336{
337 struct TCP_Server_Info *server = tcon->ses->server;
338 unsigned int rsize;
339
340 /* start with specified rsize, or default */
341 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
342 rsize = min_t(unsigned int, rsize, server->max_read);
343#ifdef CONFIG_CIFS_SMB_DIRECT
344 if (server->rdma) {
345 if (server->sign)
346 rsize = min_t(unsigned int,
347 rsize, server->smbd_conn->max_fragmented_recv_size);
348 else
349 rsize = min_t(unsigned int,
350 rsize, server->smbd_conn->max_readwrite_size);
351 }
352#endif
353
354 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
355 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
356
357 return rsize;
358}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200359
360static int
361parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
362 size_t buf_len,
363 struct cifs_server_iface **iface_list,
364 size_t *iface_count)
365{
366 struct network_interface_info_ioctl_rsp *p;
367 struct sockaddr_in *addr4;
368 struct sockaddr_in6 *addr6;
369 struct iface_info_ipv4 *p4;
370 struct iface_info_ipv6 *p6;
371 struct cifs_server_iface *info;
372 ssize_t bytes_left;
373 size_t next = 0;
374 int nb_iface = 0;
375 int rc = 0;
376
377 *iface_list = NULL;
378 *iface_count = 0;
379
380 /*
381 * Fist pass: count and sanity check
382 */
383
384 bytes_left = buf_len;
385 p = buf;
386 while (bytes_left >= sizeof(*p)) {
387 nb_iface++;
388 next = le32_to_cpu(p->Next);
389 if (!next) {
390 bytes_left -= sizeof(*p);
391 break;
392 }
393 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
394 bytes_left -= next;
395 }
396
397 if (!nb_iface) {
398 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
399 rc = -EINVAL;
400 goto out;
401 }
402
403 if (bytes_left || p->Next)
404 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
405
406
407 /*
408 * Second pass: extract info to internal structure
409 */
410
411 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
412 if (!*iface_list) {
413 rc = -ENOMEM;
414 goto out;
415 }
416
417 info = *iface_list;
418 bytes_left = buf_len;
419 p = buf;
420 while (bytes_left >= sizeof(*p)) {
421 info->speed = le64_to_cpu(p->LinkSpeed);
422 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
423 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
424
425 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
426 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
427 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
428 le32_to_cpu(p->Capability));
429
430 switch (p->Family) {
431 /*
432 * The kernel and wire socket structures have the same
433 * layout and use network byte order but make the
434 * conversion explicit in case either one changes.
435 */
436 case INTERNETWORK:
437 addr4 = (struct sockaddr_in *)&info->sockaddr;
438 p4 = (struct iface_info_ipv4 *)p->Buffer;
439 addr4->sin_family = AF_INET;
440 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
441
442 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
443 addr4->sin_port = cpu_to_be16(CIFS_PORT);
444
445 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
446 &addr4->sin_addr);
447 break;
448 case INTERNETWORKV6:
449 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
450 p6 = (struct iface_info_ipv6 *)p->Buffer;
451 addr6->sin6_family = AF_INET6;
452 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
453
454 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
455 addr6->sin6_flowinfo = 0;
456 addr6->sin6_scope_id = 0;
457 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
458
459 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
460 &addr6->sin6_addr);
461 break;
462 default:
463 cifs_dbg(VFS,
464 "%s: skipping unsupported socket family\n",
465 __func__);
466 goto next_iface;
467 }
468
469 (*iface_count)++;
470 info++;
471next_iface:
472 next = le32_to_cpu(p->Next);
473 if (!next)
474 break;
475 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
476 bytes_left -= next;
477 }
478
479 if (!*iface_count) {
480 rc = -EINVAL;
481 goto out;
482 }
483
484out:
485 if (rc) {
486 kfree(*iface_list);
487 *iface_count = 0;
488 *iface_list = NULL;
489 }
490 return rc;
491}
492
493
Steve Frenchc481e9f2013-10-14 01:21:53 -0500494static int
495SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
496{
497 int rc;
498 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200499 struct network_interface_info_ioctl_rsp *out_buf = NULL;
500 struct cifs_server_iface *iface_list;
501 size_t iface_count;
502 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500503
504 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
505 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
506 NULL /* no data input */, 0 /* no data input */,
507 (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500508 if (rc == -EOPNOTSUPP) {
509 cifs_dbg(FYI,
510 "server does not support query network interfaces\n");
511 goto out;
512 } else if (rc != 0) {
Steve French9ffc5412014-10-16 15:13:14 -0500513 cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200514 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500515 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200516
517 rc = parse_server_interfaces(out_buf, ret_data_len,
518 &iface_list, &iface_count);
519 if (rc)
520 goto out;
521
522 spin_lock(&ses->iface_lock);
523 kfree(ses->iface_list);
524 ses->iface_list = iface_list;
525 ses->iface_count = iface_count;
526 ses->iface_last_update = jiffies;
527 spin_unlock(&ses->iface_lock);
528
529out:
Steve French24df1482016-09-29 04:20:23 -0500530 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500531 return rc;
532}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500533
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000534static void
535smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000536{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000537 struct cached_fid *cfid = container_of(ref, struct cached_fid,
538 refcount);
539
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000540 if (cfid->is_valid) {
541 cifs_dbg(FYI, "clear cached root file handle\n");
542 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
543 cfid->fid->volatile_fid);
544 cfid->is_valid = false;
545 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000546}
547
548void close_shroot(struct cached_fid *cfid)
549{
550 mutex_lock(&cfid->fid_mutex);
551 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000552 mutex_unlock(&cfid->fid_mutex);
553}
554
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000555void
556smb2_cached_lease_break(struct work_struct *work)
557{
558 struct cached_fid *cfid = container_of(work,
559 struct cached_fid, lease_break);
560
561 close_shroot(cfid);
562}
563
Steve French3d4ef9a2018-04-25 22:19:09 -0500564/*
565 * Open the directory at the root of a share
566 */
567int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
568{
569 struct cifs_open_parms oparams;
570 int rc;
571 __le16 srch_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000572 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Steve French3d4ef9a2018-04-25 22:19:09 -0500573
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000574 mutex_lock(&tcon->crfid.fid_mutex);
575 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500576 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000577 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000578 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000579 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500580 return 0;
581 }
582
583 oparams.tcon = tcon;
584 oparams.create_options = 0;
585 oparams.desired_access = FILE_READ_ATTRIBUTES;
586 oparams.disposition = FILE_OPEN;
587 oparams.fid = pfid;
588 oparams.reconnect = false;
589
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000590 rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500591 if (rc == 0) {
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000592 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
593 tcon->crfid.tcon = tcon;
594 tcon->crfid.is_valid = true;
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000595 kref_init(&tcon->crfid.refcount);
596 kref_get(&tcon->crfid.refcount);
Steve French3d4ef9a2018-04-25 22:19:09 -0500597 }
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000598 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500599 return rc;
600}
601
Steve French34f62642013-10-09 02:07:00 -0500602static void
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500603smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
604{
605 int rc;
606 __le16 srch_path = 0; /* Null - open root of share */
607 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
608 struct cifs_open_parms oparms;
609 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500610 bool no_cached_open = tcon->nohandlecache;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500611
612 oparms.tcon = tcon;
613 oparms.desired_access = FILE_READ_ATTRIBUTES;
614 oparms.disposition = FILE_OPEN;
615 oparms.create_options = 0;
616 oparms.fid = &fid;
617 oparms.reconnect = false;
618
Steve French3d4ef9a2018-04-25 22:19:09 -0500619 if (no_cached_open)
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000620 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
621 NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500622 else
623 rc = open_shroot(xid, tcon, &fid);
624
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500625 if (rc)
626 return;
627
Steve Frenchc481e9f2013-10-14 01:21:53 -0500628 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500629
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500630 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
631 FS_ATTRIBUTE_INFORMATION);
632 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
633 FS_DEVICE_INFORMATION);
634 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500635 FS_VOLUME_INFORMATION);
636 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500637 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500638 if (no_cached_open)
639 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000640 else
641 close_shroot(&tcon->crfid);
642
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500643 return;
644}
645
646static void
Steve French34f62642013-10-09 02:07:00 -0500647smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
648{
649 int rc;
650 __le16 srch_path = 0; /* Null - open root of share */
651 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
652 struct cifs_open_parms oparms;
653 struct cifs_fid fid;
654
655 oparms.tcon = tcon;
656 oparms.desired_access = FILE_READ_ATTRIBUTES;
657 oparms.disposition = FILE_OPEN;
658 oparms.create_options = 0;
659 oparms.fid = &fid;
660 oparms.reconnect = false;
661
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000662 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500663 if (rc)
664 return;
665
Steven French21671142013-10-09 13:36:35 -0500666 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
667 FS_ATTRIBUTE_INFORMATION);
668 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
669 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500670 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
671 return;
672}
673
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400674static int
675smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
676 struct cifs_sb_info *cifs_sb, const char *full_path)
677{
678 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400679 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700680 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400681 struct cifs_open_parms oparms;
682 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400683
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000684 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500685 return 0;
686
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400687 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
688 if (!utf16_path)
689 return -ENOMEM;
690
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400691 oparms.tcon = tcon;
692 oparms.desired_access = FILE_READ_ATTRIBUTES;
693 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -0500694 if (backup_cred(cifs_sb))
695 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
696 else
697 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400698 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400699 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400700
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000701 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400702 if (rc) {
703 kfree(utf16_path);
704 return rc;
705 }
706
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400707 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400708 kfree(utf16_path);
709 return rc;
710}
711
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400712static int
713smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
714 struct cifs_sb_info *cifs_sb, const char *full_path,
715 u64 *uniqueid, FILE_ALL_INFO *data)
716{
717 *uniqueid = le64_to_cpu(data->IndexNumber);
718 return 0;
719}
720
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700721static int
722smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
723 struct cifs_fid *fid, FILE_ALL_INFO *data)
724{
725 int rc;
726 struct smb2_file_all_info *smb2_data;
727
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400728 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700729 GFP_KERNEL);
730 if (smb2_data == NULL)
731 return -ENOMEM;
732
733 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
734 smb2_data);
735 if (!rc)
736 move_smb2_info_to_cifs(data, smb2_data);
737 kfree(smb2_data);
738 return rc;
739}
740
Arnd Bergmann1368f152017-09-05 11:24:15 +0200741#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000742static ssize_t
743move_smb2_ea_to_cifs(char *dst, size_t dst_size,
744 struct smb2_file_full_ea_info *src, size_t src_size,
745 const unsigned char *ea_name)
746{
747 int rc = 0;
748 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
749 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000750 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000751 size_t name_len, value_len, user_name_len;
752
753 while (src_size > 0) {
754 name = &src->ea_data[0];
755 name_len = (size_t)src->ea_name_length;
756 value = &src->ea_data[src->ea_name_length + 1];
757 value_len = (size_t)le16_to_cpu(src->ea_value_length);
758
759 if (name_len == 0) {
760 break;
761 }
762
763 if (src_size < 8 + name_len + 1 + value_len) {
764 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
765 rc = -EIO;
766 goto out;
767 }
768
769 if (ea_name) {
770 if (ea_name_len == name_len &&
771 memcmp(ea_name, name, name_len) == 0) {
772 rc = value_len;
773 if (dst_size == 0)
774 goto out;
775 if (dst_size < value_len) {
776 rc = -ERANGE;
777 goto out;
778 }
779 memcpy(dst, value, value_len);
780 goto out;
781 }
782 } else {
783 /* 'user.' plus a terminating null */
784 user_name_len = 5 + 1 + name_len;
785
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000786 if (buf_size == 0) {
787 /* skip copy - calc size only */
788 rc += user_name_len;
789 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000790 dst_size -= user_name_len;
791 memcpy(dst, "user.", 5);
792 dst += 5;
793 memcpy(dst, src->ea_data, name_len);
794 dst += name_len;
795 *dst = 0;
796 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000797 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000798 } else {
799 /* stop before overrun buffer */
800 rc = -ERANGE;
801 break;
802 }
803 }
804
805 if (!src->next_entry_offset)
806 break;
807
808 if (src_size < le32_to_cpu(src->next_entry_offset)) {
809 /* stop before overrun buffer */
810 rc = -ERANGE;
811 break;
812 }
813 src_size -= le32_to_cpu(src->next_entry_offset);
814 src = (void *)((char *)src +
815 le32_to_cpu(src->next_entry_offset));
816 }
817
818 /* didn't find the named attribute */
819 if (ea_name)
820 rc = -ENODATA;
821
822out:
823 return (ssize_t)rc;
824}
825
826static ssize_t
827smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
828 const unsigned char *path, const unsigned char *ea_name,
829 char *ea_data, size_t buf_size,
830 struct cifs_sb_info *cifs_sb)
831{
832 int rc;
833 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000834 struct kvec rsp_iov = {NULL, 0};
835 int buftype = CIFS_NO_BUFFER;
836 struct smb2_query_info_rsp *rsp;
837 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000838
839 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
840 if (!utf16_path)
841 return -ENOMEM;
842
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000843 rc = smb2_query_info_compound(xid, tcon, utf16_path,
844 FILE_READ_EA,
845 FILE_FULL_EA_INFORMATION,
846 SMB2_O_INFO_FILE,
847 SMB2_MAX_EA_BUF,
848 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000849 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000850 /*
851 * If ea_name is NULL (listxattr) and there are no EAs,
852 * return 0 as it's not an error. Otherwise, the specified
853 * ea_name was not found.
854 */
855 if (!ea_name && rc == -ENODATA)
856 rc = 0;
857 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000858 }
859
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000860 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
861 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
862 le32_to_cpu(rsp->OutputBufferLength),
863 &rsp_iov,
864 sizeof(struct smb2_file_full_ea_info));
865 if (rc)
866 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +1000867
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000868 info = (struct smb2_file_full_ea_info *)(
869 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
870 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
871 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +1000872
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000873 qeas_exit:
874 kfree(utf16_path);
875 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000876 return rc;
877}
878
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000879
880static int
881smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
882 const char *path, const char *ea_name, const void *ea_value,
883 const __u16 ea_value_len, const struct nls_table *nls_codepage,
884 struct cifs_sb_info *cifs_sb)
885{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000886 struct cifs_ses *ses = tcon->ses;
887 struct TCP_Server_Info *server = ses->server;
888 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000889 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000890 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000891 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000892 struct smb_rqst rqst[3];
893 int resp_buftype[3];
894 struct kvec rsp_iov[3];
895 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
896 struct cifs_open_parms oparms;
897 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
898 struct cifs_fid fid;
899 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
900 unsigned int size[1];
901 void *data[1];
902 struct smb2_file_full_ea_info *ea = NULL;
903 struct kvec close_iov[1];
904 int rc;
905
906 if (smb3_encryption_required(tcon))
907 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000908
909 if (ea_name_len > 255)
910 return -EINVAL;
911
912 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
913 if (!utf16_path)
914 return -ENOMEM;
915
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000916 memset(rqst, 0, sizeof(rqst));
917 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
918 memset(rsp_iov, 0, sizeof(rsp_iov));
919
920 /* Open */
921 memset(&open_iov, 0, sizeof(open_iov));
922 rqst[0].rq_iov = open_iov;
923 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
924
925 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000926 oparms.tcon = tcon;
927 oparms.desired_access = FILE_WRITE_EA;
928 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -0500929 if (backup_cred(cifs_sb))
930 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
931 else
932 oparms.create_options = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000933 oparms.fid = &fid;
934 oparms.reconnect = false;
935
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000936 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
937 if (rc)
938 goto sea_exit;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -0600939 smb2_set_next_command(ses->server, &rqst[0], 0);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000940
941
942 /* Set Info */
943 memset(&si_iov, 0, sizeof(si_iov));
944 rqst[1].rq_iov = si_iov;
945 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000946
947 len = sizeof(ea) + ea_name_len + ea_value_len + 1;
948 ea = kzalloc(len, GFP_KERNEL);
949 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000950 rc = -ENOMEM;
951 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000952 }
953
954 ea->ea_name_length = ea_name_len;
955 ea->ea_value_length = cpu_to_le16(ea_value_len);
956 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
957 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
958
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000959 size[0] = len;
960 data[0] = ea;
961
962 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
963 COMPOUND_FID, current->tgid,
964 FILE_FULL_EA_INFORMATION,
965 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -0600966 smb2_set_next_command(server, &rqst[1], 0);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000967 smb2_set_related(&rqst[1]);
968
969
970 /* Close */
971 memset(&close_iov, 0, sizeof(close_iov));
972 rqst[2].rq_iov = close_iov;
973 rqst[2].rq_nvec = 1;
974 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
975 smb2_set_related(&rqst[2]);
976
977 rc = compound_send_recv(xid, ses, flags, 3, rqst,
978 resp_buftype, rsp_iov);
979
980 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -0300981 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +1000982 kfree(utf16_path);
983 SMB2_open_free(&rqst[0]);
984 SMB2_set_info_free(&rqst[1]);
985 SMB2_close_free(&rqst[2]);
986 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
987 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
988 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000989 return rc;
990}
Arnd Bergmann1368f152017-09-05 11:24:15 +0200991#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +1000992
Pavel Shilovsky9094fad2012-07-12 18:30:44 +0400993static bool
994smb2_can_echo(struct TCP_Server_Info *server)
995{
996 return server->echoes;
997}
998
Pavel Shilovskyd60622e2012-05-28 15:19:39 +0400999static void
1000smb2_clear_stats(struct cifs_tcon *tcon)
1001{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001002 int i;
1003 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1004 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1005 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1006 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001007}
1008
1009static void
Steve French769ee6a2013-06-19 14:15:30 -05001010smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1011{
1012 seq_puts(m, "\n\tShare Capabilities:");
1013 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1014 seq_puts(m, " DFS,");
1015 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1016 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1017 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1018 seq_puts(m, " SCALEOUT,");
1019 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1020 seq_puts(m, " CLUSTER,");
1021 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1022 seq_puts(m, " ASYMMETRIC,");
1023 if (tcon->capabilities == 0)
1024 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001025 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1026 seq_puts(m, " Aligned,");
1027 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1028 seq_puts(m, " Partition Aligned,");
1029 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1030 seq_puts(m, " SSD,");
1031 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1032 seq_puts(m, " TRIM-support,");
1033
Steve French769ee6a2013-06-19 14:15:30 -05001034 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001035 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001036 if (tcon->perf_sector_size)
1037 seq_printf(m, "\tOptimal sector size: 0x%x",
1038 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001039 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001040}
1041
1042static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001043smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1044{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001045 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1046 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001047
1048 /*
1049 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1050 * totals (requests sent) since those SMBs are per-session not per tcon
1051 */
Steve French52ce1ac2018-07-31 01:46:47 -05001052 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1053 (long long)(tcon->bytes_read),
1054 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001055 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1056 atomic_read(&tcon->num_local_opens),
1057 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001058 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001059 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1060 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001061 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001062 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1063 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001064 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001065 atomic_read(&sent[SMB2_CREATE_HE]),
1066 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001067 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001068 atomic_read(&sent[SMB2_CLOSE_HE]),
1069 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001070 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001071 atomic_read(&sent[SMB2_FLUSH_HE]),
1072 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001073 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001074 atomic_read(&sent[SMB2_READ_HE]),
1075 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001076 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001077 atomic_read(&sent[SMB2_WRITE_HE]),
1078 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001079 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001080 atomic_read(&sent[SMB2_LOCK_HE]),
1081 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001082 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001083 atomic_read(&sent[SMB2_IOCTL_HE]),
1084 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001085 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001086 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1087 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001088 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001089 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1090 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001091 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001092 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1093 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001094 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001095 atomic_read(&sent[SMB2_SET_INFO_HE]),
1096 atomic_read(&failed[SMB2_SET_INFO_HE]));
1097 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1098 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1099 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001100}
1101
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001102static void
1103smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1104{
David Howells2b0143b2015-03-17 22:25:59 +00001105 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001106 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1107
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001108 cfile->fid.persistent_fid = fid->persistent_fid;
1109 cfile->fid.volatile_fid = fid->volatile_fid;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001110#ifdef CONFIG_CIFS_DEBUG2
1111 cfile->fid.mid = fid->mid;
1112#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001113 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1114 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001115 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001116 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001117}
1118
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001119static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001120smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1121 struct cifs_fid *fid)
1122{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001123 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001124}
1125
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001126static int
Steve French41c13582013-11-14 00:05:36 -06001127SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1128 u64 persistent_fid, u64 volatile_fid,
1129 struct copychunk_ioctl *pcchunk)
1130{
1131 int rc;
1132 unsigned int ret_data_len;
1133 struct resume_key_req *res_key;
1134
1135 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1136 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
1137 NULL, 0 /* no input */,
1138 (char **)&res_key, &ret_data_len);
1139
1140 if (rc) {
1141 cifs_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
1142 goto req_res_key_exit;
1143 }
1144 if (ret_data_len < sizeof(struct resume_key_req)) {
1145 cifs_dbg(VFS, "Invalid refcopy resume key length\n");
1146 rc = -EINVAL;
1147 goto req_res_key_exit;
1148 }
1149 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1150
1151req_res_key_exit:
1152 kfree(res_key);
1153 return rc;
1154}
1155
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001156static int
1157smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001158 struct cifs_tcon *tcon,
1159 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001160 unsigned long p)
1161{
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001162 struct cifs_ses *ses = tcon->ses;
1163 char __user *arg = (char __user *)p;
1164 struct smb_query_info qi;
1165 struct smb_query_info __user *pqi;
1166 int rc = 0;
1167 int flags = 0;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001168 struct smb2_query_info_rsp *rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001169 void *buffer = NULL;
1170 struct smb_rqst rqst[3];
1171 int resp_buftype[3];
1172 struct kvec rsp_iov[3];
1173 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1174 struct cifs_open_parms oparms;
1175 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1176 struct cifs_fid fid;
1177 struct kvec qi_iov[1];
1178 struct kvec close_iov[1];
1179
1180 memset(rqst, 0, sizeof(rqst));
1181 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1182 memset(rsp_iov, 0, sizeof(rsp_iov));
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001183
1184 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1185 return -EFAULT;
1186
1187 if (qi.output_buffer_length > 1024)
1188 return -EINVAL;
1189
1190 if (!ses || !(ses->server))
1191 return -EIO;
1192
1193 if (smb3_encryption_required(tcon))
1194 flags |= CIFS_TRANSFORM_REQ;
1195
1196 buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
1197 if (buffer == NULL)
1198 return -ENOMEM;
1199
1200 if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
1201 qi.output_buffer_length)) {
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001202 rc = -EFAULT;
1203 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001204 }
1205
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001206 /* Open */
1207 memset(&open_iov, 0, sizeof(open_iov));
1208 rqst[0].rq_iov = open_iov;
1209 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001210
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001211 memset(&oparms, 0, sizeof(oparms));
1212 oparms.tcon = tcon;
1213 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
1214 oparms.disposition = FILE_OPEN;
1215 if (is_dir)
1216 oparms.create_options = CREATE_NOT_FILE;
1217 else
1218 oparms.create_options = CREATE_NOT_DIR;
1219 oparms.fid = &fid;
1220 oparms.reconnect = false;
1221
1222 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1223 if (rc)
1224 goto iqinf_exit;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06001225 smb2_set_next_command(ses->server, &rqst[0], 0);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001226
1227 /* Query */
1228 memset(&qi_iov, 0, sizeof(qi_iov));
1229 rqst[1].rq_iov = qi_iov;
1230 rqst[1].rq_nvec = 1;
1231
1232 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001233 qi.file_info_class, qi.info_type,
1234 qi.additional_information,
1235 qi.input_buffer_length,
1236 qi.output_buffer_length, buffer);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001237 if (rc)
1238 goto iqinf_exit;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06001239 smb2_set_next_command(ses->server, &rqst[1], 0);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001240 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001241
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001242 /* Close */
1243 memset(&close_iov, 0, sizeof(close_iov));
1244 rqst[2].rq_iov = close_iov;
1245 rqst[2].rq_nvec = 1;
1246
1247 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001248 if (rc)
1249 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001250 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001251
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001252 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1253 resp_buftype, rsp_iov);
1254 if (rc)
1255 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001256 pqi = (struct smb_query_info __user *)arg;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001257 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001258 if (le32_to_cpu(rsp->OutputBufferLength) < qi.input_buffer_length)
1259 qi.input_buffer_length = le32_to_cpu(rsp->OutputBufferLength);
1260 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1261 sizeof(qi.input_buffer_length))) {
1262 rc = -EFAULT;
1263 goto iqinf_exit;
1264 }
1265 if (copy_to_user(pqi + 1, rsp->Buffer, qi.input_buffer_length)) {
1266 rc = -EFAULT;
1267 goto iqinf_exit;
1268 }
1269
1270 iqinf_exit:
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001271 kfree(buffer);
1272 SMB2_open_free(&rqst[0]);
1273 SMB2_query_info_free(&rqst[1]);
1274 SMB2_close_free(&rqst[2]);
1275 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1276 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1277 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001278 return rc;
1279}
1280
Sachin Prabhu620d8742017-02-10 16:03:51 +05301281static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001282smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001283 struct cifsFileInfo *srcfile,
1284 struct cifsFileInfo *trgtfile, u64 src_off,
1285 u64 len, u64 dest_off)
1286{
1287 int rc;
1288 unsigned int ret_data_len;
1289 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001290 struct copychunk_ioctl_rsp *retbuf = NULL;
1291 struct cifs_tcon *tcon;
1292 int chunks_copied = 0;
1293 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301294 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001295
1296 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1297
1298 if (pcchunk == NULL)
1299 return -ENOMEM;
1300
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001301 cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n");
Steve French41c13582013-11-14 00:05:36 -06001302 /* Request a key from the server to identify the source of the copy */
1303 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1304 srcfile->fid.persistent_fid,
1305 srcfile->fid.volatile_fid, pcchunk);
1306
1307 /* Note: request_res_key sets res_key null only if rc !=0 */
1308 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001309 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001310
1311 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001312 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001313 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001314 pcchunk->Reserved2 = 0;
1315
Steve French9bf0c9c2013-11-16 18:05:28 -06001316 tcon = tlink_tcon(trgtfile->tlink);
1317
1318 while (len > 0) {
1319 pcchunk->SourceOffset = cpu_to_le64(src_off);
1320 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1321 pcchunk->Length =
1322 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1323
1324 /* Request server copy to target from src identified by key */
1325 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001326 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001327 true /* is_fsctl */, (char *)pcchunk,
Steve French9bf0c9c2013-11-16 18:05:28 -06001328 sizeof(struct copychunk_ioctl), (char **)&retbuf,
1329 &ret_data_len);
1330 if (rc == 0) {
1331 if (ret_data_len !=
1332 sizeof(struct copychunk_ioctl_rsp)) {
1333 cifs_dbg(VFS, "invalid cchunk response size\n");
1334 rc = -EIO;
1335 goto cchunk_out;
1336 }
1337 if (retbuf->TotalBytesWritten == 0) {
1338 cifs_dbg(FYI, "no bytes copied\n");
1339 rc = -EIO;
1340 goto cchunk_out;
1341 }
1342 /*
1343 * Check if server claimed to write more than we asked
1344 */
1345 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1346 le32_to_cpu(pcchunk->Length)) {
1347 cifs_dbg(VFS, "invalid copy chunk response\n");
1348 rc = -EIO;
1349 goto cchunk_out;
1350 }
1351 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
1352 cifs_dbg(VFS, "invalid num chunks written\n");
1353 rc = -EIO;
1354 goto cchunk_out;
1355 }
1356 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001357
Sachin Prabhu620d8742017-02-10 16:03:51 +05301358 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1359 src_off += bytes_written;
1360 dest_off += bytes_written;
1361 len -= bytes_written;
1362 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001363
Sachin Prabhu620d8742017-02-10 16:03:51 +05301364 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001365 le32_to_cpu(retbuf->ChunksWritten),
1366 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301367 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001368 } else if (rc == -EINVAL) {
1369 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1370 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001371
Steve French9bf0c9c2013-11-16 18:05:28 -06001372 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1373 le32_to_cpu(retbuf->ChunksWritten),
1374 le32_to_cpu(retbuf->ChunkBytesWritten),
1375 le32_to_cpu(retbuf->TotalBytesWritten));
1376
1377 /*
1378 * Check if this is the first request using these sizes,
1379 * (ie check if copy succeed once with original sizes
1380 * and check if the server gave us different sizes after
1381 * we already updated max sizes on previous request).
1382 * if not then why is the server returning an error now
1383 */
1384 if ((chunks_copied != 0) || chunk_sizes_updated)
1385 goto cchunk_out;
1386
1387 /* Check that server is not asking us to grow size */
1388 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1389 tcon->max_bytes_chunk)
1390 tcon->max_bytes_chunk =
1391 le32_to_cpu(retbuf->ChunkBytesWritten);
1392 else
1393 goto cchunk_out; /* server gave us bogus size */
1394
1395 /* No need to change MaxChunks since already set to 1 */
1396 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001397 } else
1398 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001399 }
1400
1401cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001402 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001403 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301404 if (rc)
1405 return rc;
1406 else
1407 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001408}
1409
1410static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001411smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1412 struct cifs_fid *fid)
1413{
1414 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1415}
1416
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001417static unsigned int
1418smb2_read_data_offset(char *buf)
1419{
1420 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
1421 return rsp->DataOffset;
1422}
1423
1424static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001425smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001426{
1427 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001428
1429 if (in_remaining)
1430 return le32_to_cpu(rsp->DataRemaining);
1431
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001432 return le32_to_cpu(rsp->DataLength);
1433}
1434
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001435
1436static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001437smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001438 struct cifs_io_parms *parms, unsigned int *bytes_read,
1439 char **buf, int *buf_type)
1440{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001441 parms->persistent_fid = pfid->persistent_fid;
1442 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001443 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1444}
1445
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001446static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001447smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001448 struct cifs_io_parms *parms, unsigned int *written,
1449 struct kvec *iov, unsigned long nr_segs)
1450{
1451
Steve Frenchdb8b6312014-09-22 05:13:55 -05001452 parms->persistent_fid = pfid->persistent_fid;
1453 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001454 return SMB2_write(xid, parms, written, iov, nr_segs);
1455}
1456
Steve Frenchd43cc792014-08-13 17:16:29 -05001457/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1458static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1459 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1460{
1461 struct cifsInodeInfo *cifsi;
1462 int rc;
1463
1464 cifsi = CIFS_I(inode);
1465
1466 /* if file already sparse don't bother setting sparse again */
1467 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1468 return true; /* already sparse */
1469
1470 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1471 return true; /* already not sparse */
1472
1473 /*
1474 * Can't check for sparse support on share the usual way via the
1475 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1476 * since Samba server doesn't set the flag on the share, yet
1477 * supports the set sparse FSCTL and returns sparse correctly
1478 * in the file attributes. If we fail setting sparse though we
1479 * mark that server does not support sparse files for this share
1480 * to avoid repeatedly sending the unsupported fsctl to server
1481 * if the file is repeatedly extended.
1482 */
1483 if (tcon->broken_sparse_sup)
1484 return false;
1485
1486 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1487 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001488 true /* is_fctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001489 &setsparse, 1, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001490 if (rc) {
1491 tcon->broken_sparse_sup = true;
1492 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1493 return false;
1494 }
1495
1496 if (setsparse)
1497 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1498 else
1499 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1500
1501 return true;
1502}
1503
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001504static int
1505smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1506 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1507{
1508 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001509 struct inode *inode;
1510
1511 /*
1512 * If extending file more than one page make sparse. Many Linux fs
1513 * make files sparse by default when extending via ftruncate
1514 */
David Howells2b0143b2015-03-17 22:25:59 +00001515 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001516
1517 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001518 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001519
Steve Frenchd43cc792014-08-13 17:16:29 -05001520 /* whether set sparse succeeds or not, extend the file */
1521 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001522 }
1523
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001524 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001525 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001526}
1527
Steve French02b16662015-06-27 21:18:36 -07001528static int
1529smb2_duplicate_extents(const unsigned int xid,
1530 struct cifsFileInfo *srcfile,
1531 struct cifsFileInfo *trgtfile, u64 src_off,
1532 u64 len, u64 dest_off)
1533{
1534 int rc;
1535 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001536 struct duplicate_extents_to_file dup_ext_buf;
1537 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1538
1539 /* server fileays advertise duplicate extent support with this flag */
1540 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1541 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1542 return -EOPNOTSUPP;
1543
1544 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1545 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1546 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1547 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1548 dup_ext_buf.ByteCount = cpu_to_le64(len);
1549 cifs_dbg(FYI, "duplicate extents: src off %lld dst off %lld len %lld",
1550 src_off, dest_off, len);
1551
1552 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1553 if (rc)
1554 goto duplicate_extents_out;
1555
1556 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1557 trgtfile->fid.volatile_fid,
1558 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001559 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001560 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001561 sizeof(struct duplicate_extents_to_file),
Steve French24df1482016-09-29 04:20:23 -05001562 NULL,
Steve French02b16662015-06-27 21:18:36 -07001563 &ret_data_len);
1564
1565 if (ret_data_len > 0)
1566 cifs_dbg(FYI, "non-zero response length in duplicate extents");
1567
1568duplicate_extents_out:
1569 return rc;
1570}
Steve French02b16662015-06-27 21:18:36 -07001571
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001572static int
Steve French64a5cfa2013-10-14 15:31:32 -05001573smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1574 struct cifsFileInfo *cfile)
1575{
1576 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1577 cfile->fid.volatile_fid);
1578}
1579
1580static int
Steve Frenchb3152e22015-06-24 03:17:02 -05001581smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1582 struct cifsFileInfo *cfile)
1583{
1584 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05001585 unsigned int ret_data_len;
1586
1587 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1588 integr_info.Flags = 0;
1589 integr_info.Reserved = 0;
1590
1591 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1592 cfile->fid.volatile_fid,
1593 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001594 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001595 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05001596 sizeof(struct fsctl_set_integrity_information_req),
Steve French24df1482016-09-29 04:20:23 -05001597 NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05001598 &ret_data_len);
1599
1600}
1601
Steve Frenche02789a2018-08-09 14:33:12 -05001602/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1603#define GMT_TOKEN_SIZE 50
1604
1605/*
1606 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1607 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1608 */
Steve Frenchb3152e22015-06-24 03:17:02 -05001609static int
Steve French834170c2016-09-30 21:14:26 -05001610smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1611 struct cifsFileInfo *cfile, void __user *ioc_buf)
1612{
1613 char *retbuf = NULL;
1614 unsigned int ret_data_len = 0;
1615 int rc;
1616 struct smb_snapshot_array snapshot_in;
1617
1618 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1619 cfile->fid.volatile_fid,
1620 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001621 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001622 NULL, 0 /* no input data */,
Steve French834170c2016-09-30 21:14:26 -05001623 (char **)&retbuf,
1624 &ret_data_len);
1625 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
1626 rc, ret_data_len);
1627 if (rc)
1628 return rc;
1629
1630 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
1631 /* Fixup buffer */
1632 if (copy_from_user(&snapshot_in, ioc_buf,
1633 sizeof(struct smb_snapshot_array))) {
1634 rc = -EFAULT;
1635 kfree(retbuf);
1636 return rc;
1637 }
Steve French834170c2016-09-30 21:14:26 -05001638
Steve Frenche02789a2018-08-09 14:33:12 -05001639 /*
1640 * Check for min size, ie not large enough to fit even one GMT
1641 * token (snapshot). On the first ioctl some users may pass in
1642 * smaller size (or zero) to simply get the size of the array
1643 * so the user space caller can allocate sufficient memory
1644 * and retry the ioctl again with larger array size sufficient
1645 * to hold all of the snapshot GMT tokens on the second try.
1646 */
1647 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
1648 ret_data_len = sizeof(struct smb_snapshot_array);
1649
1650 /*
1651 * We return struct SRV_SNAPSHOT_ARRAY, followed by
1652 * the snapshot array (of 50 byte GMT tokens) each
1653 * representing an available previous version of the data
1654 */
1655 if (ret_data_len > (snapshot_in.snapshot_array_size +
1656 sizeof(struct smb_snapshot_array)))
1657 ret_data_len = snapshot_in.snapshot_array_size +
1658 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05001659
1660 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
1661 rc = -EFAULT;
1662 }
1663
1664 kfree(retbuf);
1665 return rc;
1666}
1667
1668static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001669smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
1670 const char *path, struct cifs_sb_info *cifs_sb,
1671 struct cifs_fid *fid, __u16 search_flags,
1672 struct cifs_search_info *srch_inf)
1673{
1674 __le16 *utf16_path;
1675 int rc;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001676 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001677 struct cifs_open_parms oparms;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001678
1679 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1680 if (!utf16_path)
1681 return -ENOMEM;
1682
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001683 oparms.tcon = tcon;
1684 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
1685 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001686 if (backup_cred(cifs_sb))
1687 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1688 else
1689 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001690 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001691 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001692
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10001693 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001694 kfree(utf16_path);
1695 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07001696 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001697 return rc;
1698 }
1699
1700 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02001701 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001702
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001703 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
1704 fid->volatile_fid, 0, srch_inf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001705 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07001706 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001707 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001708 }
1709 return rc;
1710}
1711
1712static int
1713smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
1714 struct cifs_fid *fid, __u16 search_flags,
1715 struct cifs_search_info *srch_inf)
1716{
1717 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
1718 fid->volatile_fid, 0, srch_inf);
1719}
1720
1721static int
1722smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
1723 struct cifs_fid *fid)
1724{
1725 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1726}
1727
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001728/*
1729* If we negotiate SMB2 protocol and get STATUS_PENDING - update
1730* the number of credits and return true. Otherwise - return false.
1731*/
1732static bool
1733smb2_is_status_pending(char *buf, struct TCP_Server_Info *server, int length)
1734{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10001735 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001736
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07001737 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001738 return false;
1739
1740 if (!length) {
1741 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07001742 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001743 spin_unlock(&server->req_lock);
1744 wake_up(&server->request_q);
1745 }
1746
1747 return true;
1748}
1749
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07001750static bool
1751smb2_is_session_expired(char *buf)
1752{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10001753 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07001754
Mark Symsd81243c2018-05-24 09:47:31 +01001755 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
1756 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07001757 return false;
1758
Steve Frenche68a9322018-07-30 14:23:58 -05001759 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
1760 le16_to_cpu(shdr->Command),
1761 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01001762 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05001763
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07001764 return true;
1765}
1766
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07001767static int
1768smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
1769 struct cifsInodeInfo *cinode)
1770{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07001771 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
1772 return SMB2_lease_break(0, tcon, cinode->lease_key,
1773 smb2_get_lease_state(cinode));
1774
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07001775 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
1776 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001777 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07001778}
1779
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10001780void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001781smb2_set_related(struct smb_rqst *rqst)
1782{
1783 struct smb2_sync_hdr *shdr;
1784
1785 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
1786 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
1787}
1788
1789char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
1790
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10001791void
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06001792smb2_set_next_command(struct TCP_Server_Info *server, struct smb_rqst *rqst,
1793 bool has_space_for_padding)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001794{
1795 struct smb2_sync_hdr *shdr;
1796 unsigned long len = smb_rqst_len(server, rqst);
1797
1798 /* SMB headers in a compound are 8 byte aligned. */
1799 if (len & 7) {
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06001800 if (has_space_for_padding) {
1801 len = rqst->rq_iov[rqst->rq_nvec - 1].iov_len;
1802 rqst->rq_iov[rqst->rq_nvec - 1].iov_len =
1803 (len + 7) & ~7;
1804 } else {
1805 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
1806 rqst->rq_iov[rqst->rq_nvec].iov_len = 8 - (len & 7);
1807 rqst->rq_nvec++;
1808 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001809 len = smb_rqst_len(server, rqst);
1810 }
1811
1812 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
1813 shdr->NextCommand = cpu_to_le32(len);
1814}
1815
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001816/*
1817 * Passes the query info response back to the caller on success.
1818 * Caller need to free this with free_rsp_buf().
1819 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001820int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001821smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
1822 __le16 *utf16_path, u32 desired_access,
1823 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001824 struct kvec *rsp, int *buftype,
1825 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07001826{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001827 struct cifs_ses *ses = tcon->ses;
1828 struct TCP_Server_Info *server = ses->server;
1829 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001830 struct smb_rqst rqst[3];
1831 int resp_buftype[3];
1832 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10001833 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001834 struct kvec qi_iov[1];
1835 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07001836 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001837 struct cifs_open_parms oparms;
1838 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001839 int rc;
1840
1841 if (smb3_encryption_required(tcon))
1842 flags |= CIFS_TRANSFORM_REQ;
1843
1844 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10001845 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001846 memset(rsp_iov, 0, sizeof(rsp_iov));
1847
1848 memset(&open_iov, 0, sizeof(open_iov));
1849 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10001850 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07001851
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001852 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001853 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001854 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001855 if (cifs_sb && backup_cred(cifs_sb))
1856 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1857 else
1858 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001859 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001860 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001861
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001862 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07001863 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001864 goto qic_exit;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06001865 smb2_set_next_command(server, &rqst[0], 0);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001866
1867 memset(&qi_iov, 0, sizeof(qi_iov));
1868 rqst[1].rq_iov = qi_iov;
1869 rqst[1].rq_nvec = 1;
1870
1871 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001872 class, type, 0,
1873 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001874 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001875 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001876 goto qic_exit;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06001877 smb2_set_next_command(server, &rqst[1], 0);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001878 smb2_set_related(&rqst[1]);
1879
1880 memset(&close_iov, 0, sizeof(close_iov));
1881 rqst[2].rq_iov = close_iov;
1882 rqst[2].rq_nvec = 1;
1883
1884 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
1885 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001886 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001887 smb2_set_related(&rqst[2]);
1888
1889 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1890 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001891 if (rc) {
1892 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001893 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001894 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001895 *rsp = rsp_iov[1];
1896 *buftype = resp_buftype[1];
1897
1898 qic_exit:
1899 SMB2_open_free(&rqst[0]);
1900 SMB2_query_info_free(&rqst[1]);
1901 SMB2_close_free(&rqst[2]);
1902 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1903 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
1904 return rc;
1905}
1906
1907static int
1908smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
1909 struct kstatfs *buf)
1910{
1911 struct smb2_query_info_rsp *rsp;
1912 struct smb2_fs_full_size_info *info = NULL;
1913 __le16 utf16_path = 0; /* Null - open root of share */
1914 struct kvec rsp_iov = {NULL, 0};
1915 int buftype = CIFS_NO_BUFFER;
1916 int rc;
1917
1918
1919 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
1920 FILE_READ_ATTRIBUTES,
1921 FS_FULL_SIZE_INFORMATION,
1922 SMB2_O_INFO_FILESYSTEM,
1923 sizeof(struct smb2_fs_full_size_info),
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001924 &rsp_iov, &buftype, NULL);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001925 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001926 goto qfs_exit;
1927
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001928 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07001929 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001930 info = (struct smb2_fs_full_size_info *)(
1931 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1932 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1933 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001934 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10001935 sizeof(struct smb2_fs_full_size_info));
1936 if (!rc)
1937 smb2_copy_fs_info_to_kstatfs(info, buf);
1938
1939qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06001940 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07001941 return rc;
1942}
1943
Steve French2d304212018-06-24 23:28:12 -05001944static int
1945smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
1946 struct kstatfs *buf)
1947{
1948 int rc;
1949 __le16 srch_path = 0; /* Null - open root of share */
1950 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1951 struct cifs_open_parms oparms;
1952 struct cifs_fid fid;
1953
1954 if (!tcon->posix_extensions)
1955 return smb2_queryfs(xid, tcon, buf);
1956
1957 oparms.tcon = tcon;
1958 oparms.desired_access = FILE_READ_ATTRIBUTES;
1959 oparms.disposition = FILE_OPEN;
1960 oparms.create_options = 0;
1961 oparms.fid = &fid;
1962 oparms.reconnect = false;
1963
1964 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
1965 if (rc)
1966 return rc;
1967
1968 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
1969 fid.volatile_fid, buf);
1970 buf->f_type = SMB2_MAGIC_NUMBER;
1971 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
1972 return rc;
1973}
Steve French2d304212018-06-24 23:28:12 -05001974
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07001975static bool
1976smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
1977{
1978 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
1979 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
1980}
1981
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001982static int
1983smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
1984 __u64 length, __u32 type, int lock, int unlock, bool wait)
1985{
1986 if (unlock && !lock)
1987 type = SMB2_LOCKFLAG_UNLOCK;
1988 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
1989 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
1990 current->tgid, length, offset, type, wait);
1991}
1992
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001993static void
1994smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
1995{
1996 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
1997}
1998
1999static void
2000smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2001{
2002 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2003}
2004
2005static void
2006smb2_new_lease_key(struct cifs_fid *fid)
2007{
Steve Frenchfa70b872016-09-22 00:39:34 -05002008 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002009}
2010
Aurelien Aptel9d496402017-02-13 16:16:49 +01002011static int
2012smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2013 const char *search_name,
2014 struct dfs_info3_param **target_nodes,
2015 unsigned int *num_of_nodes,
2016 const struct nls_table *nls_codepage, int remap)
2017{
2018 int rc;
2019 __le16 *utf16_path = NULL;
2020 int utf16_path_len = 0;
2021 struct cifs_tcon *tcon;
2022 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2023 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2024 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2025
2026 cifs_dbg(FYI, "smb2_get_dfs_refer path <%s>\n", search_name);
2027
2028 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002029 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002030 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002031 tcon = ses->tcon_ipc;
2032 if (tcon == NULL) {
2033 spin_lock(&cifs_tcp_ses_lock);
2034 tcon = list_first_entry_or_null(&ses->tcon_list,
2035 struct cifs_tcon,
2036 tcon_list);
2037 if (tcon)
2038 tcon->tc_count++;
2039 spin_unlock(&cifs_tcp_ses_lock);
2040 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002041
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002042 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002043 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2044 ses);
2045 rc = -ENOTCONN;
2046 goto out;
2047 }
2048
2049 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2050 &utf16_path_len,
2051 nls_codepage, remap);
2052 if (!utf16_path) {
2053 rc = -ENOMEM;
2054 goto out;
2055 }
2056
2057 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2058 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2059 if (!dfs_req) {
2060 rc = -ENOMEM;
2061 goto out;
2062 }
2063
2064 /* Highest DFS referral version understood */
2065 dfs_req->MaxReferralLevel = DFS_VERSION;
2066
2067 /* Path to resolve in an UTF-16 null-terminated string */
2068 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2069
2070 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002071 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2072 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002073 true /* is_fsctl */,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002074 (char *)dfs_req, dfs_req_size,
2075 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002076 } while (rc == -EAGAIN);
2077
2078 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002079 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Aurelien Aptel57025912017-11-21 14:47:56 +01002080 cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002081 goto out;
2082 }
2083
2084 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2085 num_of_nodes, target_nodes,
2086 nls_codepage, remap, search_name,
2087 true /* is_unicode */);
2088 if (rc) {
2089 cifs_dbg(VFS, "parse error in smb2_get_dfs_refer rc=%d\n", rc);
2090 goto out;
2091 }
2092
2093 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002094 if (tcon && !tcon->ipc) {
2095 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002096 spin_lock(&cifs_tcp_ses_lock);
2097 tcon->tc_count--;
2098 spin_unlock(&cifs_tcp_ses_lock);
2099 }
2100 kfree(utf16_path);
2101 kfree(dfs_req);
2102 kfree(dfs_rsp);
2103 return rc;
2104}
Pavel Shilovsky78932422016-07-24 10:37:38 +03002105#define SMB2_SYMLINK_STRUCT_SIZE \
2106 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2107
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002108static int
2109smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2110 const char *full_path, char **target_path,
2111 struct cifs_sb_info *cifs_sb)
2112{
2113 int rc;
2114 __le16 *utf16_path;
2115 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2116 struct cifs_open_parms oparms;
2117 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002118 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002119 struct smb2_err_rsp *err_buf = NULL;
2120 int resp_buftype;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002121 struct smb2_symlink_err_rsp *symlink;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002122 unsigned int sub_len;
2123 unsigned int sub_offset;
2124 unsigned int print_len;
2125 unsigned int print_offset;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002126
2127 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2128
2129 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2130 if (!utf16_path)
2131 return -ENOMEM;
2132
2133 oparms.tcon = tcon;
2134 oparms.desired_access = FILE_READ_ATTRIBUTES;
2135 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05002136 if (backup_cred(cifs_sb))
2137 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2138 else
2139 oparms.create_options = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002140 oparms.fid = &fid;
2141 oparms.reconnect = false;
2142
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002143 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
2144 &resp_buftype);
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002145 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002146 rc = -ENOENT;
Dan Carpenterff361fd2018-06-19 15:25:30 +03002147 goto free_path;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002148 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002149
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002150 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002151 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002152 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002153 rc = -ENOENT;
2154 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002155 }
2156
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002157 /* open must fail on symlink - reset rc */
2158 rc = 0;
2159 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2160 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2161 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002162 print_len = le16_to_cpu(symlink->PrintNameLength);
2163 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2164
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002165 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002166 rc = -ENOENT;
2167 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002168 }
2169
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002170 if (err_iov.iov_len <
2171 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002172 rc = -ENOENT;
2173 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002174 }
2175
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002176 *target_path = cifs_strndup_from_utf16(
2177 (char *)symlink->PathBuffer + sub_offset,
2178 sub_len, true, cifs_sb->local_nls);
2179 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002180 rc = -ENOMEM;
2181 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002182 }
2183 convert_delimiter(*target_path, '/');
2184 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002185
2186 querty_exit:
2187 free_rsp_buf(resp_buftype, err_buf);
Dan Carpenterff361fd2018-06-19 15:25:30 +03002188 free_path:
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002189 kfree(utf16_path);
2190 return rc;
2191}
2192
Arnd Bergmann84908422017-06-27 17:06:13 +02002193#ifdef CONFIG_CIFS_ACL
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002194static struct cifs_ntsd *
2195get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2196 const struct cifs_fid *cifsfid, u32 *pacllen)
2197{
2198 struct cifs_ntsd *pntsd = NULL;
2199 unsigned int xid;
2200 int rc = -EOPNOTSUPP;
2201 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2202
2203 if (IS_ERR(tlink))
2204 return ERR_CAST(tlink);
2205
2206 xid = get_xid();
2207 cifs_dbg(FYI, "trying to get acl\n");
2208
2209 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2210 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2211 free_xid(xid);
2212
2213 cifs_put_tlink(tlink);
2214
2215 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2216 if (rc)
2217 return ERR_PTR(rc);
2218 return pntsd;
2219
2220}
2221
2222static struct cifs_ntsd *
2223get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2224 const char *path, u32 *pacllen)
2225{
2226 struct cifs_ntsd *pntsd = NULL;
2227 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2228 unsigned int xid;
2229 int rc;
2230 struct cifs_tcon *tcon;
2231 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2232 struct cifs_fid fid;
2233 struct cifs_open_parms oparms;
2234 __le16 *utf16_path;
2235
2236 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
2237 if (IS_ERR(tlink))
2238 return ERR_CAST(tlink);
2239
2240 tcon = tlink_tcon(tlink);
2241 xid = get_xid();
2242
2243 if (backup_cred(cifs_sb))
Colin Ian King709340a2017-07-05 13:47:34 +01002244 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002245 else
2246 oparms.create_options = 0;
2247
2248 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002249 if (!utf16_path) {
2250 rc = -ENOMEM;
2251 free_xid(xid);
2252 return ERR_PTR(rc);
2253 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002254
2255 oparms.tcon = tcon;
2256 oparms.desired_access = READ_CONTROL;
2257 oparms.disposition = FILE_OPEN;
2258 oparms.fid = &fid;
2259 oparms.reconnect = false;
2260
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002261 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002262 kfree(utf16_path);
2263 if (!rc) {
2264 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2265 fid.volatile_fid, (void **)&pntsd, pacllen);
2266 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2267 }
2268
2269 cifs_put_tlink(tlink);
2270 free_xid(xid);
2271
2272 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2273 if (rc)
2274 return ERR_PTR(rc);
2275 return pntsd;
2276}
2277
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002278#ifdef CONFIG_CIFS_ACL
2279static int
2280set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2281 struct inode *inode, const char *path, int aclflag)
2282{
2283 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2284 unsigned int xid;
2285 int rc, access_flags = 0;
2286 struct cifs_tcon *tcon;
2287 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2288 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2289 struct cifs_fid fid;
2290 struct cifs_open_parms oparms;
2291 __le16 *utf16_path;
2292
2293 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
2294 if (IS_ERR(tlink))
2295 return PTR_ERR(tlink);
2296
2297 tcon = tlink_tcon(tlink);
2298 xid = get_xid();
2299
2300 if (backup_cred(cifs_sb))
2301 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2302 else
2303 oparms.create_options = 0;
2304
2305 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
2306 access_flags = WRITE_OWNER;
2307 else
2308 access_flags = WRITE_DAC;
2309
2310 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002311 if (!utf16_path) {
2312 rc = -ENOMEM;
2313 free_xid(xid);
2314 return rc;
2315 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002316
2317 oparms.tcon = tcon;
2318 oparms.desired_access = access_flags;
2319 oparms.disposition = FILE_OPEN;
2320 oparms.path = path;
2321 oparms.fid = &fid;
2322 oparms.reconnect = false;
2323
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002324 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002325 kfree(utf16_path);
2326 if (!rc) {
2327 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2328 fid.volatile_fid, pnntsd, acllen, aclflag);
2329 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2330 }
2331
2332 cifs_put_tlink(tlink);
2333 free_xid(xid);
2334 return rc;
2335}
2336#endif /* CIFS_ACL */
2337
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002338/* Retrieve an ACL from the server */
2339static struct cifs_ntsd *
2340get_smb2_acl(struct cifs_sb_info *cifs_sb,
2341 struct inode *inode, const char *path,
2342 u32 *pacllen)
2343{
2344 struct cifs_ntsd *pntsd = NULL;
2345 struct cifsFileInfo *open_file = NULL;
2346
2347 if (inode)
2348 open_file = find_readable_file(CIFS_I(inode), true);
2349 if (!open_file)
2350 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
2351
2352 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
2353 cifsFileInfo_put(open_file);
2354 return pntsd;
2355}
Arnd Bergmann84908422017-06-27 17:06:13 +02002356#endif
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002357
Steve French30175622014-08-17 18:16:40 -05002358static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2359 loff_t offset, loff_t len, bool keep_size)
2360{
2361 struct inode *inode;
2362 struct cifsInodeInfo *cifsi;
2363 struct cifsFileInfo *cfile = file->private_data;
2364 struct file_zero_data_information fsctl_buf;
2365 long rc;
2366 unsigned int xid;
2367
2368 xid = get_xid();
2369
David Howells2b0143b2015-03-17 22:25:59 +00002370 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05002371 cifsi = CIFS_I(inode);
2372
2373 /* if file not oplocked can't be sure whether asking to extend size */
2374 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002375 if (keep_size == false) {
2376 rc = -EOPNOTSUPP;
2377 free_xid(xid);
2378 return rc;
2379 }
Steve French30175622014-08-17 18:16:40 -05002380
Steve French2bb93d22014-08-20 18:56:29 -05002381 /*
Steve French30175622014-08-17 18:16:40 -05002382 * Must check if file sparse since fallocate -z (zero range) assumes
2383 * non-sparse allocation
2384 */
Steve Frenchcfe89092018-05-19 02:04:55 -05002385 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
2386 rc = -EOPNOTSUPP;
2387 free_xid(xid);
2388 return rc;
2389 }
Steve French30175622014-08-17 18:16:40 -05002390
2391 /*
2392 * need to make sure we are not asked to extend the file since the SMB3
2393 * fsctl does not change the file size. In the future we could change
2394 * this to zero the first part of the range then set the file size
2395 * which for a non sparse file would zero the newly extended range
2396 */
2397 if (keep_size == false)
Steve Frenchcfe89092018-05-19 02:04:55 -05002398 if (i_size_read(inode) < offset + len) {
2399 rc = -EOPNOTSUPP;
2400 free_xid(xid);
2401 return rc;
2402 }
Steve French30175622014-08-17 18:16:40 -05002403
2404 cifs_dbg(FYI, "offset %lld len %lld", offset, len);
2405
2406 fsctl_buf.FileOffset = cpu_to_le64(offset);
2407 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2408
2409 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2410 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002411 true /* is_fctl */, (char *)&fsctl_buf,
Steve French30175622014-08-17 18:16:40 -05002412 sizeof(struct file_zero_data_information), NULL, NULL);
2413 free_xid(xid);
2414 return rc;
2415}
2416
Steve French31742c52014-08-17 08:38:47 -05002417static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2418 loff_t offset, loff_t len)
2419{
2420 struct inode *inode;
2421 struct cifsInodeInfo *cifsi;
2422 struct cifsFileInfo *cfile = file->private_data;
2423 struct file_zero_data_information fsctl_buf;
2424 long rc;
2425 unsigned int xid;
2426 __u8 set_sparse = 1;
2427
2428 xid = get_xid();
2429
David Howells2b0143b2015-03-17 22:25:59 +00002430 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05002431 cifsi = CIFS_I(inode);
2432
2433 /* Need to make file sparse, if not already, before freeing range. */
2434 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05002435 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
2436 rc = -EOPNOTSUPP;
2437 free_xid(xid);
2438 return rc;
2439 }
Steve French31742c52014-08-17 08:38:47 -05002440
2441 cifs_dbg(FYI, "offset %lld len %lld", offset, len);
2442
2443 fsctl_buf.FileOffset = cpu_to_le64(offset);
2444 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2445
2446 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2447 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002448 true /* is_fctl */, (char *)&fsctl_buf,
Steve French31742c52014-08-17 08:38:47 -05002449 sizeof(struct file_zero_data_information), NULL, NULL);
2450 free_xid(xid);
2451 return rc;
2452}
2453
Steve French9ccf3212014-10-18 17:01:15 -05002454static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
2455 loff_t off, loff_t len, bool keep_size)
2456{
2457 struct inode *inode;
2458 struct cifsInodeInfo *cifsi;
2459 struct cifsFileInfo *cfile = file->private_data;
2460 long rc = -EOPNOTSUPP;
2461 unsigned int xid;
2462
2463 xid = get_xid();
2464
David Howells2b0143b2015-03-17 22:25:59 +00002465 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05002466 cifsi = CIFS_I(inode);
2467
2468 /* if file not oplocked can't be sure whether asking to extend size */
2469 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002470 if (keep_size == false) {
2471 free_xid(xid);
2472 return rc;
2473 }
Steve French9ccf3212014-10-18 17:01:15 -05002474
2475 /*
2476 * Files are non-sparse by default so falloc may be a no-op
2477 * Must check if file sparse. If not sparse, and not extending
2478 * then no need to do anything since file already allocated
2479 */
2480 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
2481 if (keep_size == true)
Steve Frenchcfe89092018-05-19 02:04:55 -05002482 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05002483 /* check if extending file */
2484 else if (i_size_read(inode) >= off + len)
2485 /* not extending file and already not sparse */
Steve Frenchcfe89092018-05-19 02:04:55 -05002486 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05002487 /* BB: in future add else clause to extend file */
2488 else
Steve Frenchcfe89092018-05-19 02:04:55 -05002489 rc = -EOPNOTSUPP;
2490 free_xid(xid);
2491 return rc;
Steve French9ccf3212014-10-18 17:01:15 -05002492 }
2493
2494 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
2495 /*
2496 * Check if falloc starts within first few pages of file
2497 * and ends within a few pages of the end of file to
2498 * ensure that most of file is being forced to be
2499 * fallocated now. If so then setting whole file sparse
2500 * ie potentially making a few extra pages at the beginning
2501 * or end of the file non-sparse via set_sparse is harmless.
2502 */
Steve Frenchcfe89092018-05-19 02:04:55 -05002503 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
2504 rc = -EOPNOTSUPP;
2505 free_xid(xid);
2506 return rc;
2507 }
Steve French9ccf3212014-10-18 17:01:15 -05002508
2509 rc = smb2_set_sparse(xid, tcon, cfile, inode, false);
2510 }
2511 /* BB: else ... in future add code to extend file and set sparse */
2512
2513
2514 free_xid(xid);
2515 return rc;
2516}
2517
2518
Steve French31742c52014-08-17 08:38:47 -05002519static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
2520 loff_t off, loff_t len)
2521{
2522 /* KEEP_SIZE already checked for by do_fallocate */
2523 if (mode & FALLOC_FL_PUNCH_HOLE)
2524 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05002525 else if (mode & FALLOC_FL_ZERO_RANGE) {
2526 if (mode & FALLOC_FL_KEEP_SIZE)
2527 return smb3_zero_range(file, tcon, off, len, true);
2528 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05002529 } else if (mode == FALLOC_FL_KEEP_SIZE)
2530 return smb3_simple_falloc(file, tcon, off, len, true);
2531 else if (mode == 0)
2532 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05002533
2534 return -EOPNOTSUPP;
2535}
2536
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002537static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002538smb2_downgrade_oplock(struct TCP_Server_Info *server,
2539 struct cifsInodeInfo *cinode, bool set_level2)
2540{
2541 if (set_level2)
2542 server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
2543 0, NULL);
2544 else
2545 server->ops->set_oplock_level(cinode, 0, 0, NULL);
2546}
2547
2548static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002549smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
2550 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002551{
2552 oplock &= 0xFF;
2553 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
2554 return;
2555 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002556 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002557 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
2558 &cinode->vfs_inode);
2559 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002560 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002561 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
2562 &cinode->vfs_inode);
2563 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
2564 cinode->oplock = CIFS_CACHE_READ_FLG;
2565 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
2566 &cinode->vfs_inode);
2567 } else
2568 cinode->oplock = 0;
2569}
2570
2571static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002572smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
2573 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002574{
2575 char message[5] = {0};
2576
2577 oplock &= 0xFF;
2578 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
2579 return;
2580
2581 cinode->oplock = 0;
2582 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
2583 cinode->oplock |= CIFS_CACHE_READ_FLG;
2584 strcat(message, "R");
2585 }
2586 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
2587 cinode->oplock |= CIFS_CACHE_HANDLE_FLG;
2588 strcat(message, "H");
2589 }
2590 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
2591 cinode->oplock |= CIFS_CACHE_WRITE_FLG;
2592 strcat(message, "W");
2593 }
2594 if (!cinode->oplock)
2595 strcat(message, "None");
2596 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
2597 &cinode->vfs_inode);
2598}
2599
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002600static void
2601smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
2602 unsigned int epoch, bool *purge_cache)
2603{
2604 unsigned int old_oplock = cinode->oplock;
2605
2606 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
2607
2608 if (purge_cache) {
2609 *purge_cache = false;
2610 if (old_oplock == CIFS_CACHE_READ_FLG) {
2611 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
2612 (epoch - cinode->epoch > 0))
2613 *purge_cache = true;
2614 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
2615 (epoch - cinode->epoch > 1))
2616 *purge_cache = true;
2617 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
2618 (epoch - cinode->epoch > 1))
2619 *purge_cache = true;
2620 else if (cinode->oplock == 0 &&
2621 (epoch - cinode->epoch > 0))
2622 *purge_cache = true;
2623 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
2624 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
2625 (epoch - cinode->epoch > 0))
2626 *purge_cache = true;
2627 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
2628 (epoch - cinode->epoch > 1))
2629 *purge_cache = true;
2630 }
2631 cinode->epoch = epoch;
2632 }
2633}
2634
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002635static bool
2636smb2_is_read_op(__u32 oplock)
2637{
2638 return oplock == SMB2_OPLOCK_LEVEL_II;
2639}
2640
2641static bool
2642smb21_is_read_op(__u32 oplock)
2643{
2644 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
2645 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
2646}
2647
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002648static __le32
2649map_oplock_to_lease(u8 oplock)
2650{
2651 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
2652 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
2653 else if (oplock == SMB2_OPLOCK_LEVEL_II)
2654 return SMB2_LEASE_READ_CACHING;
2655 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
2656 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
2657 SMB2_LEASE_WRITE_CACHING;
2658 return 0;
2659}
2660
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002661static char *
2662smb2_create_lease_buf(u8 *lease_key, u8 oplock)
2663{
2664 struct create_lease *buf;
2665
2666 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
2667 if (!buf)
2668 return NULL;
2669
Stefano Brivio729c0c92018-07-05 15:10:02 +02002670 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002671 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002672
2673 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2674 (struct create_lease, lcontext));
2675 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
2676 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2677 (struct create_lease, Name));
2678 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07002679 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04002680 buf->Name[0] = 'R';
2681 buf->Name[1] = 'q';
2682 buf->Name[2] = 'L';
2683 buf->Name[3] = 's';
2684 return (char *)buf;
2685}
2686
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002687static char *
2688smb3_create_lease_buf(u8 *lease_key, u8 oplock)
2689{
2690 struct create_lease_v2 *buf;
2691
2692 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
2693 if (!buf)
2694 return NULL;
2695
Stefano Brivio729c0c92018-07-05 15:10:02 +02002696 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002697 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
2698
2699 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2700 (struct create_lease_v2, lcontext));
2701 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
2702 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2703 (struct create_lease_v2, Name));
2704 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07002705 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002706 buf->Name[0] = 'R';
2707 buf->Name[1] = 'q';
2708 buf->Name[2] = 'L';
2709 buf->Name[3] = 's';
2710 return (char *)buf;
2711}
2712
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002713static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06002714smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002715{
2716 struct create_lease *lc = (struct create_lease *)buf;
2717
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002718 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04002719 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
2720 return SMB2_OPLOCK_LEVEL_NOCHANGE;
2721 return le32_to_cpu(lc->lcontext.LeaseState);
2722}
2723
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002724static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06002725smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002726{
2727 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
2728
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002729 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002730 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
2731 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06002732 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02002733 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04002734 return le32_to_cpu(lc->lcontext.LeaseState);
2735}
2736
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04002737static unsigned int
2738smb2_wp_retry_size(struct inode *inode)
2739{
2740 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
2741 SMB2_MAX_BUFFER_SIZE);
2742}
2743
Pavel Shilovsky52755802014-08-18 20:49:57 +04002744static bool
2745smb2_dir_needs_close(struct cifsFileInfo *cfile)
2746{
2747 return !cfile->invalidHandle;
2748}
2749
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002750static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10002751fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
2752 struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002753{
2754 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10002755 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002756
2757 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
2758 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
2759 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
2760 tr_hdr->Flags = cpu_to_le16(0x01);
2761 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CMM_NONCE);
2762 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002763}
2764
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11002765/* We can not use the normal sg_set_buf() as we will sometimes pass a
2766 * stack object as buf.
2767 */
2768static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
2769 unsigned int buflen)
2770{
2771 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
2772}
2773
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002774/* Assumes the first rqst has a transform header as the first iov.
2775 * I.e.
2776 * rqst[0].rq_iov[0] is transform header
2777 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
2778 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10002779 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002780static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002781init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002782{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002783 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002784 struct scatterlist *sg;
2785 unsigned int i;
2786 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002787 unsigned int idx = 0;
2788 int skip;
2789
2790 sg_len = 1;
2791 for (i = 0; i < num_rqst; i++)
2792 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002793
2794 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
2795 if (!sg)
2796 return NULL;
2797
2798 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002799 for (i = 0; i < num_rqst; i++) {
2800 for (j = 0; j < rqst[i].rq_nvec; j++) {
2801 /*
2802 * The first rqst has a transform header where the
2803 * first 20 bytes are not part of the encrypted blob
2804 */
2805 skip = (i == 0) && (j == 0) ? 20 : 0;
2806 smb2_sg_set_buf(&sg[idx++],
2807 rqst[i].rq_iov[j].iov_base + skip,
2808 rqst[i].rq_iov[j].iov_len - skip);
2809 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05002810
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002811 for (j = 0; j < rqst[i].rq_npages; j++) {
2812 unsigned int len, offset;
2813
2814 rqst_page_get_length(&rqst[i], j, &len, &offset);
2815 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
2816 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002817 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002818 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002819 return sg;
2820}
2821
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08002822static int
2823smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
2824{
2825 struct cifs_ses *ses;
2826 u8 *ses_enc_key;
2827
2828 spin_lock(&cifs_tcp_ses_lock);
2829 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
2830 if (ses->Suid != ses_id)
2831 continue;
2832 ses_enc_key = enc ? ses->smb3encryptionkey :
2833 ses->smb3decryptionkey;
2834 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
2835 spin_unlock(&cifs_tcp_ses_lock);
2836 return 0;
2837 }
2838 spin_unlock(&cifs_tcp_ses_lock);
2839
2840 return 1;
2841}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002842/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10002843 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
2844 * iov[0] - transform header (associate data),
2845 * iov[1-N] - SMB2 header and pages - data to encrypt.
2846 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002847 * untouched.
2848 */
2849static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002850crypt_message(struct TCP_Server_Info *server, int num_rqst,
2851 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002852{
2853 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002854 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002855 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002856 int rc = 0;
2857 struct scatterlist *sg;
2858 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08002859 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002860 struct aead_request *req;
2861 char *iv;
2862 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01002863 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002864 struct crypto_aead *tfm;
2865 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
2866
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08002867 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
2868 if (rc) {
2869 cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
2870 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002871 return 0;
2872 }
2873
2874 rc = smb3_crypto_aead_allocate(server);
2875 if (rc) {
2876 cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
2877 return rc;
2878 }
2879
2880 tfm = enc ? server->secmech.ccmaesencrypt :
2881 server->secmech.ccmaesdecrypt;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08002882 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002883 if (rc) {
2884 cifs_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
2885 return rc;
2886 }
2887
2888 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
2889 if (rc) {
2890 cifs_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
2891 return rc;
2892 }
2893
2894 req = aead_request_alloc(tfm, GFP_KERNEL);
2895 if (!req) {
2896 cifs_dbg(VFS, "%s: Failed to alloc aead request", __func__);
2897 return -ENOMEM;
2898 }
2899
2900 if (!enc) {
2901 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
2902 crypt_len += SMB2_SIGNATURE_SIZE;
2903 }
2904
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002905 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002906 if (!sg) {
Christophe Jaillet517a6e42017-06-11 09:12:47 +02002907 cifs_dbg(VFS, "%s: Failed to init sg", __func__);
2908 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002909 goto free_req;
2910 }
2911
2912 iv_len = crypto_aead_ivsize(tfm);
2913 iv = kzalloc(iv_len, GFP_KERNEL);
2914 if (!iv) {
2915 cifs_dbg(VFS, "%s: Failed to alloc IV", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02002916 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002917 goto free_sg;
2918 }
2919 iv[0] = 3;
2920 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CMM_NONCE);
2921
2922 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
2923 aead_request_set_ad(req, assoc_data_len);
2924
2925 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01002926 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002927
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01002928 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
2929 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002930
2931 if (!rc && enc)
2932 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
2933
2934 kfree(iv);
2935free_sg:
2936 kfree(sg);
2937free_req:
2938 kfree(req);
2939 return rc;
2940}
2941
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002942void
2943smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002944{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002945 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002946
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002947 for (i = 0; i < num_rqst; i++) {
2948 if (rqst[i].rq_pages) {
2949 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
2950 put_page(rqst[i].rq_pages[j]);
2951 kfree(rqst[i].rq_pages);
2952 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002953 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002954}
2955
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002956/*
2957 * This function will initialize new_rq and encrypt the content.
2958 * The first entry, new_rq[0], only contains a single iov which contains
2959 * a smb2_transform_hdr and is pre-allocated by the caller.
2960 * This function then populates new_rq[1+] with the content from olq_rq[0+].
2961 *
2962 * The end result is an array of smb_rqst structures where the first structure
2963 * only contains a single iov for the transform header which we then can pass
2964 * to crypt_message().
2965 *
2966 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
2967 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
2968 */
2969static int
2970smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
2971 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002972{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002973 struct page **pages;
2974 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
2975 unsigned int npages;
2976 unsigned int orig_len = 0;
2977 int i, j;
2978 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07002979
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10002980 for (i = 1; i < num_rqst; i++) {
2981 npages = old_rq[i - 1].rq_npages;
2982 pages = kmalloc_array(npages, sizeof(struct page *),
2983 GFP_KERNEL);
2984 if (!pages)
2985 goto err_free;
2986
2987 new_rq[i].rq_pages = pages;
2988 new_rq[i].rq_npages = npages;
2989 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
2990 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
2991 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
2992 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
2993 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
2994
2995 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
2996
2997 for (j = 0; j < npages; j++) {
2998 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2999 if (!pages[j])
3000 goto err_free;
3001 }
3002
3003 /* copy pages form the old */
3004 for (j = 0; j < npages; j++) {
3005 char *dst, *src;
3006 unsigned int offset, len;
3007
3008 rqst_page_get_length(&new_rq[i], j, &len, &offset);
3009
3010 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
3011 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
3012
3013 memcpy(dst, src, len);
3014 kunmap(new_rq[i].rq_pages[j]);
3015 kunmap(old_rq[i - 1].rq_pages[j]);
3016 }
3017 }
3018
3019 /* fill the 1st iov with a transform header */
3020 fill_transform_hdr(tr_hdr, orig_len, old_rq);
3021
3022 rc = crypt_message(server, num_rqst, new_rq, 1);
3023 cifs_dbg(FYI, "encrypt message returned %d", rc);
3024 if (rc)
3025 goto err_free;
3026
3027 return rc;
3028
3029err_free:
3030 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
3031 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003032}
3033
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003034static int
3035smb3_is_transform_hdr(void *buf)
3036{
3037 struct smb2_transform_hdr *trhdr = buf;
3038
3039 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
3040}
3041
3042static int
3043decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
3044 unsigned int buf_data_size, struct page **pages,
3045 unsigned int npages, unsigned int page_data_size)
3046{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003047 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003048 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003049 int rc;
3050
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003051 iov[0].iov_base = buf;
3052 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
3053 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
3054 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003055
3056 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003057 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003058 rqst.rq_pages = pages;
3059 rqst.rq_npages = npages;
3060 rqst.rq_pagesz = PAGE_SIZE;
3061 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
3062
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003063 rc = crypt_message(server, 1, &rqst, 0);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003064 cifs_dbg(FYI, "decrypt message returned %d\n", rc);
3065
3066 if (rc)
3067 return rc;
3068
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003069 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003070
3071 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003072
3073 return rc;
3074}
3075
3076static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003077read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
3078 unsigned int npages, unsigned int len)
3079{
3080 int i;
3081 int length;
3082
3083 for (i = 0; i < npages; i++) {
3084 struct page *page = pages[i];
3085 size_t n;
3086
3087 n = len;
3088 if (len >= PAGE_SIZE) {
3089 /* enough data to fill the page */
3090 n = PAGE_SIZE;
3091 len -= n;
3092 } else {
3093 zero_user(page, len, PAGE_SIZE - len);
3094 len = 0;
3095 }
Long Li1dbe3462018-05-30 12:47:55 -07003096 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003097 if (length < 0)
3098 return length;
3099 server->total_read += length;
3100 }
3101
3102 return 0;
3103}
3104
3105static int
3106init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
3107 unsigned int cur_off, struct bio_vec **page_vec)
3108{
3109 struct bio_vec *bvec;
3110 int i;
3111
3112 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
3113 if (!bvec)
3114 return -ENOMEM;
3115
3116 for (i = 0; i < npages; i++) {
3117 bvec[i].bv_page = pages[i];
3118 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
3119 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
3120 data_size -= bvec[i].bv_len;
3121 }
3122
3123 if (data_size != 0) {
3124 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
3125 kfree(bvec);
3126 return -EIO;
3127 }
3128
3129 *page_vec = bvec;
3130 return 0;
3131}
3132
3133static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003134handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3135 char *buf, unsigned int buf_len, struct page **pages,
3136 unsigned int npages, unsigned int page_data_size)
3137{
3138 unsigned int data_offset;
3139 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003140 unsigned int cur_off;
3141 unsigned int cur_page_idx;
3142 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003143 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10003144 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003145 struct bio_vec *bvec = NULL;
3146 struct iov_iter iter;
3147 struct kvec iov;
3148 int length;
Long Li74dcf412017-11-22 17:38:46 -07003149 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003150
3151 if (shdr->Command != SMB2_READ) {
3152 cifs_dbg(VFS, "only big read responses are supported\n");
3153 return -ENOTSUPP;
3154 }
3155
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003156 if (server->ops->is_session_expired &&
3157 server->ops->is_session_expired(buf)) {
3158 cifs_reconnect(server);
3159 wake_up(&server->response_q);
3160 return -1;
3161 }
3162
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003163 if (server->ops->is_status_pending &&
3164 server->ops->is_status_pending(buf, server, 0))
3165 return -1;
3166
3167 rdata->result = server->ops->map_error(buf, false);
3168 if (rdata->result != 0) {
3169 cifs_dbg(FYI, "%s: server returned error %d\n",
3170 __func__, rdata->result);
3171 dequeue_mid(mid, rdata->result);
3172 return 0;
3173 }
3174
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003175 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07003176#ifdef CONFIG_CIFS_SMB_DIRECT
3177 use_rdma_mr = rdata->mr;
3178#endif
3179 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003180
3181 if (data_offset < server->vals->read_rsp_size) {
3182 /*
3183 * win2k8 sometimes sends an offset of 0 when the read
3184 * is beyond the EOF. Treat it as if the data starts just after
3185 * the header.
3186 */
3187 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
3188 __func__, data_offset);
3189 data_offset = server->vals->read_rsp_size;
3190 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
3191 /* data_offset is beyond the end of smallbuf */
3192 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
3193 __func__, data_offset);
3194 rdata->result = -EIO;
3195 dequeue_mid(mid, rdata->result);
3196 return 0;
3197 }
3198
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003199 pad_len = data_offset - server->vals->read_rsp_size;
3200
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003201 if (buf_len <= data_offset) {
3202 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003203 cur_page_idx = pad_len / PAGE_SIZE;
3204 cur_off = pad_len % PAGE_SIZE;
3205
3206 if (cur_page_idx != 0) {
3207 /* data offset is beyond the 1st page of response */
3208 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
3209 __func__, data_offset);
3210 rdata->result = -EIO;
3211 dequeue_mid(mid, rdata->result);
3212 return 0;
3213 }
3214
3215 if (data_len > page_data_size - pad_len) {
3216 /* data_len is corrupt -- discard frame */
3217 rdata->result = -EIO;
3218 dequeue_mid(mid, rdata->result);
3219 return 0;
3220 }
3221
3222 rdata->result = init_read_bvec(pages, npages, page_data_size,
3223 cur_off, &bvec);
3224 if (rdata->result != 0) {
3225 dequeue_mid(mid, rdata->result);
3226 return 0;
3227 }
3228
David Howellsaa563d72018-10-20 00:57:56 +01003229 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003230 } else if (buf_len >= data_offset + data_len) {
3231 /* read response payload is in buf */
3232 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
3233 iov.iov_base = buf + data_offset;
3234 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01003235 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003236 } else {
3237 /* read response payload cannot be in both buf and pages */
3238 WARN_ONCE(1, "buf can not contain only a part of read data");
3239 rdata->result = -EIO;
3240 dequeue_mid(mid, rdata->result);
3241 return 0;
3242 }
3243
3244 /* set up first iov for signature check */
3245 rdata->iov[0].iov_base = buf;
3246 rdata->iov[0].iov_len = 4;
3247 rdata->iov[1].iov_base = buf + 4;
3248 rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
3249 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3250 rdata->iov[0].iov_base, server->vals->read_rsp_size);
3251
3252 length = rdata->copy_into_pages(server, rdata, &iter);
3253
3254 kfree(bvec);
3255
3256 if (length < 0)
3257 return length;
3258
3259 dequeue_mid(mid, false);
3260 return length;
3261}
3262
3263static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003264receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
3265{
3266 char *buf = server->smallbuf;
3267 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
3268 unsigned int npages;
3269 struct page **pages;
3270 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003271 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003272 int rc;
3273 int i = 0;
3274
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003275 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003276 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
3277
3278 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
3279 if (rc < 0)
3280 return rc;
3281 server->total_read += rc;
3282
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003283 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11003284 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003285 npages = DIV_ROUND_UP(len, PAGE_SIZE);
3286
3287 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
3288 if (!pages) {
3289 rc = -ENOMEM;
3290 goto discard_data;
3291 }
3292
3293 for (; i < npages; i++) {
3294 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3295 if (!pages[i]) {
3296 rc = -ENOMEM;
3297 goto discard_data;
3298 }
3299 }
3300
3301 /* read read data into pages */
3302 rc = read_data_into_pages(server, pages, npages, len);
3303 if (rc)
3304 goto free_pages;
3305
Pavel Shilovsky350be252017-04-10 10:31:33 -07003306 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003307 if (rc)
3308 goto free_pages;
3309
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003310 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003311 pages, npages, len);
3312 if (rc)
3313 goto free_pages;
3314
3315 *mid = smb2_find_mid(server, buf);
3316 if (*mid == NULL)
3317 cifs_dbg(FYI, "mid not found\n");
3318 else {
3319 cifs_dbg(FYI, "mid found\n");
3320 (*mid)->decrypted = true;
3321 rc = handle_read_data(server, *mid, buf,
3322 server->vals->read_rsp_size,
3323 pages, npages, len);
3324 }
3325
3326free_pages:
3327 for (i = i - 1; i >= 0; i--)
3328 put_page(pages[i]);
3329 kfree(pages);
3330 return rc;
3331discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07003332 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003333 goto free_pages;
3334}
3335
3336static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003337receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003338 struct mid_q_entry **mids, char **bufs,
3339 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003340{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003341 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003342 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003343 char *tmpbuf;
3344 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10003345 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003346 unsigned int buf_size;
3347 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003348 int next_is_large;
3349 char *next_buffer = NULL;
3350
3351 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003352
3353 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003354 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003355 server->large_buf = true;
3356 memcpy(server->bigbuf, buf, server->total_read);
3357 buf = server->bigbuf;
3358 }
3359
3360 /* now read the rest */
3361 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003362 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003363 if (length < 0)
3364 return length;
3365 server->total_read += length;
3366
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003367 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003368 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
3369 if (length)
3370 return length;
3371
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003372 next_is_large = server->large_buf;
3373 one_more:
3374 shdr = (struct smb2_sync_hdr *)buf;
3375 if (shdr->NextCommand) {
3376 if (next_is_large) {
3377 tmpbuf = server->bigbuf;
3378 next_buffer = (char *)cifs_buf_get();
3379 } else {
3380 tmpbuf = server->smallbuf;
3381 next_buffer = (char *)cifs_small_buf_get();
3382 }
3383 memcpy(next_buffer,
3384 tmpbuf + le32_to_cpu(shdr->NextCommand),
3385 pdu_length - le32_to_cpu(shdr->NextCommand));
3386 }
3387
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003388 mid_entry = smb2_find_mid(server, buf);
3389 if (mid_entry == NULL)
3390 cifs_dbg(FYI, "mid not found\n");
3391 else {
3392 cifs_dbg(FYI, "mid found\n");
3393 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003394 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003395 }
3396
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003397 if (*num_mids >= MAX_COMPOUND) {
3398 cifs_dbg(VFS, "too many PDUs in compound\n");
3399 return -1;
3400 }
3401 bufs[*num_mids] = buf;
3402 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003403
3404 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003405 ret = mid_entry->handle(server, mid_entry);
3406 else
3407 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003408
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003409 if (ret == 0 && shdr->NextCommand) {
3410 pdu_length -= le32_to_cpu(shdr->NextCommand);
3411 server->large_buf = next_is_large;
3412 if (next_is_large)
3413 server->bigbuf = next_buffer;
3414 else
3415 server->smallbuf = next_buffer;
3416
3417 buf += le32_to_cpu(shdr->NextCommand);
3418 goto one_more;
3419 }
3420
3421 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003422}
3423
3424static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003425smb3_receive_transform(struct TCP_Server_Info *server,
3426 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003427{
3428 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10003429 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003430 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
3431 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3432
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003433 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003434 sizeof(struct smb2_sync_hdr)) {
3435 cifs_dbg(VFS, "Transform message is too small (%u)\n",
3436 pdu_length);
3437 cifs_reconnect(server);
3438 wake_up(&server->response_q);
3439 return -ECONNABORTED;
3440 }
3441
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003442 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003443 cifs_dbg(VFS, "Transform message is broken\n");
3444 cifs_reconnect(server);
3445 wake_up(&server->response_q);
3446 return -ECONNABORTED;
3447 }
3448
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003449 /* TODO: add support for compounds containing READ. */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003450 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server))
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003451 return receive_encrypted_read(server, &mids[0]);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003452
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003453 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003454}
3455
3456int
3457smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
3458{
3459 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
3460
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003461 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003462 NULL, 0, 0);
3463}
3464
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10003465static int
3466smb2_next_header(char *buf)
3467{
3468 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
3469 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
3470
3471 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
3472 return sizeof(struct smb2_transform_hdr) +
3473 le32_to_cpu(t_hdr->OriginalMessageSize);
3474
3475 return le32_to_cpu(hdr->NextCommand);
3476}
3477
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003478struct smb_version_operations smb20_operations = {
3479 .compare_fids = smb2_compare_fids,
3480 .setup_request = smb2_setup_request,
3481 .setup_async_request = smb2_setup_async_request,
3482 .check_receive = smb2_check_receive,
3483 .add_credits = smb2_add_credits,
3484 .set_credits = smb2_set_credits,
3485 .get_credits_field = smb2_get_credits_field,
3486 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04003487 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003488 .get_next_mid = smb2_get_next_mid,
3489 .read_data_offset = smb2_read_data_offset,
3490 .read_data_length = smb2_read_data_length,
3491 .map_error = map_smb2_to_linux_error,
3492 .find_mid = smb2_find_mid,
3493 .check_message = smb2_check_message,
3494 .dump_detail = smb2_dump_detail,
3495 .clear_stats = smb2_clear_stats,
3496 .print_stats = smb2_print_stats,
3497 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08003498 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003499 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003500 .need_neg = smb2_need_neg,
3501 .negotiate = smb2_negotiate,
3502 .negotiate_wsize = smb2_negotiate_wsize,
3503 .negotiate_rsize = smb2_negotiate_rsize,
3504 .sess_setup = SMB2_sess_setup,
3505 .logoff = SMB2_logoff,
3506 .tree_connect = SMB2_tcon,
3507 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05003508 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003509 .is_path_accessible = smb2_is_path_accessible,
3510 .can_echo = smb2_can_echo,
3511 .echo = SMB2_echo,
3512 .query_path_info = smb2_query_path_info,
3513 .get_srv_inum = smb2_get_srv_inum,
3514 .query_file_info = smb2_query_file_info,
3515 .set_path_size = smb2_set_path_size,
3516 .set_file_size = smb2_set_file_size,
3517 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05003518 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003519 .mkdir = smb2_mkdir,
3520 .mkdir_setinfo = smb2_mkdir_setinfo,
3521 .rmdir = smb2_rmdir,
3522 .unlink = smb2_unlink,
3523 .rename = smb2_rename_path,
3524 .create_hardlink = smb2_create_hardlink,
3525 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01003526 .query_mf_symlink = smb3_query_mf_symlink,
3527 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003528 .open = smb2_open_file,
3529 .set_fid = smb2_set_fid,
3530 .close = smb2_close_file,
3531 .flush = smb2_flush_file,
3532 .async_readv = smb2_async_readv,
3533 .async_writev = smb2_async_writev,
3534 .sync_read = smb2_sync_read,
3535 .sync_write = smb2_sync_write,
3536 .query_dir_first = smb2_query_dir_first,
3537 .query_dir_next = smb2_query_dir_next,
3538 .close_dir = smb2_close_dir,
3539 .calc_smb_size = smb2_calc_size,
3540 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003541 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003542 .oplock_response = smb2_oplock_response,
3543 .queryfs = smb2_queryfs,
3544 .mand_lock = smb2_mand_lock,
3545 .mand_unlock_range = smb2_unlock_range,
3546 .push_mand_locks = smb2_push_mandatory_locks,
3547 .get_lease_key = smb2_get_lease_key,
3548 .set_lease_key = smb2_set_lease_key,
3549 .new_lease_key = smb2_new_lease_key,
3550 .calc_signature = smb2_calc_signature,
3551 .is_read_op = smb2_is_read_op,
3552 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003553 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003554 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05003555 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003556 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04003557 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01003558 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05303559 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003560#ifdef CONFIG_CIFS_XATTR
3561 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10003562 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003563#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003564#ifdef CONFIG_CIFS_ACL
3565 .get_acl = get_smb2_acl,
3566 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003567 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003568#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10003569 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003570 .ioctl_query_info = smb2_ioctl_query_info,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003571};
3572
Steve French1080ef72011-02-24 18:07:19 +00003573struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07003574 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04003575 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04003576 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04003577 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04003578 .add_credits = smb2_add_credits,
3579 .set_credits = smb2_set_credits,
3580 .get_credits_field = smb2_get_credits_field,
3581 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04003582 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04003583 .get_next_mid = smb2_get_next_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003584 .read_data_offset = smb2_read_data_offset,
3585 .read_data_length = smb2_read_data_length,
3586 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04003587 .find_mid = smb2_find_mid,
3588 .check_message = smb2_check_message,
3589 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04003590 .clear_stats = smb2_clear_stats,
3591 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07003592 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08003593 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003594 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04003595 .need_neg = smb2_need_neg,
3596 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07003597 .negotiate_wsize = smb2_negotiate_wsize,
3598 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04003599 .sess_setup = SMB2_sess_setup,
3600 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04003601 .tree_connect = SMB2_tcon,
3602 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05003603 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04003604 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04003605 .can_echo = smb2_can_echo,
3606 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04003607 .query_path_info = smb2_query_path_info,
3608 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07003609 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07003610 .set_path_size = smb2_set_path_size,
3611 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07003612 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05003613 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04003614 .mkdir = smb2_mkdir,
3615 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04003616 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07003617 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07003618 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07003619 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003620 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05003621 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05003622 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07003623 .open = smb2_open_file,
3624 .set_fid = smb2_set_fid,
3625 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07003626 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003627 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07003628 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07003629 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07003630 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07003631 .query_dir_first = smb2_query_dir_first,
3632 .query_dir_next = smb2_query_dir_next,
3633 .close_dir = smb2_close_dir,
3634 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07003635 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003636 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07003637 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07003638 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07003639 .mand_lock = smb2_mand_lock,
3640 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07003641 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07003642 .get_lease_key = smb2_get_lease_key,
3643 .set_lease_key = smb2_set_lease_key,
3644 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06003645 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003646 .is_read_op = smb21_is_read_op,
3647 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003648 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003649 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05003650 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003651 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04003652 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05003653 .enum_snapshots = smb3_enum_snapshots,
Aurelien Aptel9d496402017-02-13 16:16:49 +01003654 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05303655 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003656#ifdef CONFIG_CIFS_XATTR
3657 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10003658 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003659#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003660#ifdef CONFIG_CIFS_ACL
3661 .get_acl = get_smb2_acl,
3662 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003663 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003664#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10003665 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003666 .ioctl_query_info = smb2_ioctl_query_info,
Steve French38107d42012-12-08 22:08:06 -06003667};
3668
Steve French38107d42012-12-08 22:08:06 -06003669struct smb_version_operations smb30_operations = {
3670 .compare_fids = smb2_compare_fids,
3671 .setup_request = smb2_setup_request,
3672 .setup_async_request = smb2_setup_async_request,
3673 .check_receive = smb2_check_receive,
3674 .add_credits = smb2_add_credits,
3675 .set_credits = smb2_set_credits,
3676 .get_credits_field = smb2_get_credits_field,
3677 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04003678 .wait_mtu_credits = smb2_wait_mtu_credits,
Steve French38107d42012-12-08 22:08:06 -06003679 .get_next_mid = smb2_get_next_mid,
3680 .read_data_offset = smb2_read_data_offset,
3681 .read_data_length = smb2_read_data_length,
3682 .map_error = map_smb2_to_linux_error,
3683 .find_mid = smb2_find_mid,
3684 .check_message = smb2_check_message,
3685 .dump_detail = smb2_dump_detail,
3686 .clear_stats = smb2_clear_stats,
3687 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05003688 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06003689 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08003690 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003691 .downgrade_oplock = smb2_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06003692 .need_neg = smb2_need_neg,
3693 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05003694 .negotiate_wsize = smb3_negotiate_wsize,
3695 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06003696 .sess_setup = SMB2_sess_setup,
3697 .logoff = SMB2_logoff,
3698 .tree_connect = SMB2_tcon,
3699 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05003700 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06003701 .is_path_accessible = smb2_is_path_accessible,
3702 .can_echo = smb2_can_echo,
3703 .echo = SMB2_echo,
3704 .query_path_info = smb2_query_path_info,
3705 .get_srv_inum = smb2_get_srv_inum,
3706 .query_file_info = smb2_query_file_info,
3707 .set_path_size = smb2_set_path_size,
3708 .set_file_size = smb2_set_file_size,
3709 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05003710 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06003711 .mkdir = smb2_mkdir,
3712 .mkdir_setinfo = smb2_mkdir_setinfo,
3713 .rmdir = smb2_rmdir,
3714 .unlink = smb2_unlink,
3715 .rename = smb2_rename_path,
3716 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003717 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05003718 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05003719 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06003720 .open = smb2_open_file,
3721 .set_fid = smb2_set_fid,
3722 .close = smb2_close_file,
3723 .flush = smb2_flush_file,
3724 .async_readv = smb2_async_readv,
3725 .async_writev = smb2_async_writev,
3726 .sync_read = smb2_sync_read,
3727 .sync_write = smb2_sync_write,
3728 .query_dir_first = smb2_query_dir_first,
3729 .query_dir_next = smb2_query_dir_next,
3730 .close_dir = smb2_close_dir,
3731 .calc_smb_size = smb2_calc_size,
3732 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003733 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06003734 .oplock_response = smb2_oplock_response,
3735 .queryfs = smb2_queryfs,
3736 .mand_lock = smb2_mand_lock,
3737 .mand_unlock_range = smb2_unlock_range,
3738 .push_mand_locks = smb2_push_mandatory_locks,
3739 .get_lease_key = smb2_get_lease_key,
3740 .set_lease_key = smb2_set_lease_key,
3741 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06003742 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06003743 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05003744 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003745 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003746 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003747 .create_lease_buf = smb3_create_lease_buf,
3748 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05003749 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05003750 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06003751 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003752 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04003753 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05003754 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05003755 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003756 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003757 .is_transform_hdr = smb3_is_transform_hdr,
3758 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01003759 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05303760 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003761#ifdef CONFIG_CIFS_XATTR
3762 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10003763 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003764#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003765#ifdef CONFIG_CIFS_ACL
3766 .get_acl = get_smb2_acl,
3767 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003768 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003769#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10003770 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003771 .ioctl_query_info = smb2_ioctl_query_info,
Steve French1080ef72011-02-24 18:07:19 +00003772};
3773
Steve Frenchaab18932015-06-23 23:37:11 -05003774struct smb_version_operations smb311_operations = {
3775 .compare_fids = smb2_compare_fids,
3776 .setup_request = smb2_setup_request,
3777 .setup_async_request = smb2_setup_async_request,
3778 .check_receive = smb2_check_receive,
3779 .add_credits = smb2_add_credits,
3780 .set_credits = smb2_set_credits,
3781 .get_credits_field = smb2_get_credits_field,
3782 .get_credits = smb2_get_credits,
3783 .wait_mtu_credits = smb2_wait_mtu_credits,
3784 .get_next_mid = smb2_get_next_mid,
3785 .read_data_offset = smb2_read_data_offset,
3786 .read_data_length = smb2_read_data_length,
3787 .map_error = map_smb2_to_linux_error,
3788 .find_mid = smb2_find_mid,
3789 .check_message = smb2_check_message,
3790 .dump_detail = smb2_dump_detail,
3791 .clear_stats = smb2_clear_stats,
3792 .print_stats = smb2_print_stats,
3793 .dump_share_caps = smb2_dump_share_caps,
3794 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08003795 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05003796 .downgrade_oplock = smb2_downgrade_oplock,
3797 .need_neg = smb2_need_neg,
3798 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05003799 .negotiate_wsize = smb3_negotiate_wsize,
3800 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05003801 .sess_setup = SMB2_sess_setup,
3802 .logoff = SMB2_logoff,
3803 .tree_connect = SMB2_tcon,
3804 .tree_disconnect = SMB2_tdis,
3805 .qfs_tcon = smb3_qfs_tcon,
3806 .is_path_accessible = smb2_is_path_accessible,
3807 .can_echo = smb2_can_echo,
3808 .echo = SMB2_echo,
3809 .query_path_info = smb2_query_path_info,
3810 .get_srv_inum = smb2_get_srv_inum,
3811 .query_file_info = smb2_query_file_info,
3812 .set_path_size = smb2_set_path_size,
3813 .set_file_size = smb2_set_file_size,
3814 .set_file_info = smb2_set_file_info,
3815 .set_compression = smb2_set_compression,
3816 .mkdir = smb2_mkdir,
3817 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05003818 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05003819 .rmdir = smb2_rmdir,
3820 .unlink = smb2_unlink,
3821 .rename = smb2_rename_path,
3822 .create_hardlink = smb2_create_hardlink,
3823 .query_symlink = smb2_query_symlink,
3824 .query_mf_symlink = smb3_query_mf_symlink,
3825 .create_mf_symlink = smb3_create_mf_symlink,
3826 .open = smb2_open_file,
3827 .set_fid = smb2_set_fid,
3828 .close = smb2_close_file,
3829 .flush = smb2_flush_file,
3830 .async_readv = smb2_async_readv,
3831 .async_writev = smb2_async_writev,
3832 .sync_read = smb2_sync_read,
3833 .sync_write = smb2_sync_write,
3834 .query_dir_first = smb2_query_dir_first,
3835 .query_dir_next = smb2_query_dir_next,
3836 .close_dir = smb2_close_dir,
3837 .calc_smb_size = smb2_calc_size,
3838 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003839 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05003840 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05003841 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05003842 .mand_lock = smb2_mand_lock,
3843 .mand_unlock_range = smb2_unlock_range,
3844 .push_mand_locks = smb2_push_mandatory_locks,
3845 .get_lease_key = smb2_get_lease_key,
3846 .set_lease_key = smb2_set_lease_key,
3847 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06003848 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05003849 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05003850 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05003851 .is_read_op = smb21_is_read_op,
3852 .set_oplock_level = smb3_set_oplock_level,
3853 .create_lease_buf = smb3_create_lease_buf,
3854 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05003855 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07003856 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05003857/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
3858 .wp_retry_size = smb2_wp_retry_size,
3859 .dir_needs_close = smb2_dir_needs_close,
3860 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05003861 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003862 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003863 .is_transform_hdr = smb3_is_transform_hdr,
3864 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01003865 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05303866 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003867#ifdef CONFIG_CIFS_XATTR
3868 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10003869 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10003870#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10003871#ifdef CONFIG_CIFS_ACL
3872 .get_acl = get_smb2_acl,
3873 .get_acl_by_fid = get_smb2_acl_by_fid,
3874 .set_acl = set_smb2_acl,
3875#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10003876 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05003877 .ioctl_query_info = smb2_ioctl_query_info,
Steve Frenchaab18932015-06-23 23:37:11 -05003878};
Steve Frenchaab18932015-06-23 23:37:11 -05003879
Steve Frenchdd446b12012-11-28 23:21:06 -06003880struct smb_version_values smb20_values = {
3881 .version_string = SMB20_VERSION_STRING,
3882 .protocol_id = SMB20_PROT_ID,
3883 .req_capabilities = 0, /* MBZ */
3884 .large_lock_type = 0,
3885 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3886 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3887 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003888 .header_size = sizeof(struct smb2_sync_hdr),
3889 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06003890 .max_header_size = MAX_SMB2_HDR_SIZE,
3891 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
3892 .lock_cmd = SMB2_LOCK,
3893 .cap_unix = 0,
3894 .cap_nt_find = SMB2_NT_FIND,
3895 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04003896 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
3897 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003898 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06003899};
3900
Steve French1080ef72011-02-24 18:07:19 +00003901struct smb_version_values smb21_values = {
3902 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05003903 .protocol_id = SMB21_PROT_ID,
3904 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
3905 .large_lock_type = 0,
3906 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3907 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3908 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003909 .header_size = sizeof(struct smb2_sync_hdr),
3910 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05003911 .max_header_size = MAX_SMB2_HDR_SIZE,
3912 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
3913 .lock_cmd = SMB2_LOCK,
3914 .cap_unix = 0,
3915 .cap_nt_find = SMB2_NT_FIND,
3916 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04003917 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
3918 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003919 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05003920};
3921
Steve French9764c022017-09-17 10:41:35 -05003922struct smb_version_values smb3any_values = {
3923 .version_string = SMB3ANY_VERSION_STRING,
3924 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05003925 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05003926 .large_lock_type = 0,
3927 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3928 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3929 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003930 .header_size = sizeof(struct smb2_sync_hdr),
3931 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05003932 .max_header_size = MAX_SMB2_HDR_SIZE,
3933 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
3934 .lock_cmd = SMB2_LOCK,
3935 .cap_unix = 0,
3936 .cap_nt_find = SMB2_NT_FIND,
3937 .cap_large_files = SMB2_LARGE_FILES,
3938 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
3939 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
3940 .create_lease_size = sizeof(struct create_lease_v2),
3941};
3942
3943struct smb_version_values smbdefault_values = {
3944 .version_string = SMBDEFAULT_VERSION_STRING,
3945 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05003946 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05003947 .large_lock_type = 0,
3948 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3949 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3950 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003951 .header_size = sizeof(struct smb2_sync_hdr),
3952 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05003953 .max_header_size = MAX_SMB2_HDR_SIZE,
3954 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
3955 .lock_cmd = SMB2_LOCK,
3956 .cap_unix = 0,
3957 .cap_nt_find = SMB2_NT_FIND,
3958 .cap_large_files = SMB2_LARGE_FILES,
3959 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
3960 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
3961 .create_lease_size = sizeof(struct create_lease_v2),
3962};
3963
Steve Frenche4aa25e2012-10-01 12:26:22 -05003964struct smb_version_values smb30_values = {
3965 .version_string = SMB30_VERSION_STRING,
3966 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05003967 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07003968 .large_lock_type = 0,
3969 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3970 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3971 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003972 .header_size = sizeof(struct smb2_sync_hdr),
3973 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04003974 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003975 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04003976 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003977 .cap_unix = 0,
3978 .cap_nt_find = SMB2_NT_FIND,
3979 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04003980 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
3981 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003982 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00003983};
Steve French20b6d8b2013-06-12 22:48:41 -05003984
3985struct smb_version_values smb302_values = {
3986 .version_string = SMB302_VERSION_STRING,
3987 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05003988 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05003989 .large_lock_type = 0,
3990 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3991 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3992 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003993 .header_size = sizeof(struct smb2_sync_hdr),
3994 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05003995 .max_header_size = MAX_SMB2_HDR_SIZE,
3996 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
3997 .lock_cmd = SMB2_LOCK,
3998 .cap_unix = 0,
3999 .cap_nt_find = SMB2_NT_FIND,
4000 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004001 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4002 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004003 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05004004};
Steve French5f7fbf72014-12-17 22:52:58 -06004005
Steve French5f7fbf72014-12-17 22:52:58 -06004006struct smb_version_values smb311_values = {
4007 .version_string = SMB311_VERSION_STRING,
4008 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004009 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06004010 .large_lock_type = 0,
4011 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4012 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4013 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004014 .header_size = sizeof(struct smb2_sync_hdr),
4015 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06004016 .max_header_size = MAX_SMB2_HDR_SIZE,
4017 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4018 .lock_cmd = SMB2_LOCK,
4019 .cap_unix = 0,
4020 .cap_nt_find = SMB2_NT_FIND,
4021 .cap_large_files = SMB2_LARGE_FILES,
4022 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4023 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4024 .create_lease_size = sizeof(struct create_lease_v2),
4025};