blob: 542b50c0b29265423eb59db46ec46755b9a6f35e [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2 as published
9 * by the Free Software Foundation.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
14 * the GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -070021#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070022#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050023#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070024#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020025#include <linux/uuid.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070026#include <crypto/aead.h>
Steve French1080ef72011-02-24 18:07:19 +000027#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040028#include "smb2pdu.h"
29#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040030#include "cifsproto.h"
31#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040032#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070033#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070034#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050035#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070036#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040037
Pavel Shilovskyef68e832019-01-18 17:25:36 -080038/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040039static int
40change_conf(struct TCP_Server_Info *server)
41{
42 server->credits += server->echo_credits + server->oplock_credits;
43 server->oplock_credits = server->echo_credits = 0;
44 switch (server->credits) {
45 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080046 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040047 case 1:
48 server->echoes = false;
49 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040050 break;
51 case 2:
52 server->echoes = true;
53 server->oplocks = false;
54 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040055 break;
56 default:
57 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050058 if (enable_oplocks) {
59 server->oplocks = true;
60 server->oplock_credits = 1;
61 } else
62 server->oplocks = false;
63
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040064 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040065 }
66 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080067 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040068}
69
70static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080071smb2_add_credits(struct TCP_Server_Info *server,
72 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040073{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080074 int *val, rc = -1;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080075 unsigned int add = credits->value;
76 unsigned int instance = credits->instance;
77 bool reconnect_detected = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080078
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040079 spin_lock(&server->req_lock);
80 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050081
82 /* eg found case where write overlapping reconnect messed up credits */
83 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
84 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
85 server->hostname, *val);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080086 if ((instance == 0) || (instance == server->reconnect_instance))
87 *val += add;
88 else
89 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050090
Steve French141891f2016-09-23 00:44:16 -050091 if (*val > 65000) {
92 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
93 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
94 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040095 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040096 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040097 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070098 /*
99 * Sometimes server returns 0 credits on oplock break ack - we need to
100 * rebalance credits in this case.
101 */
102 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
103 server->oplocks) {
104 if (server->credits > 1) {
105 server->credits--;
106 server->oplock_credits++;
107 }
108 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400109 spin_unlock(&server->req_lock);
110 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800111
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800112 if (reconnect_detected)
113 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
114 add, instance);
115
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800116 if (server->tcpStatus == CifsNeedReconnect
117 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800118 return;
119
120 switch (rc) {
121 case -1:
122 /* change_conf hasn't been executed */
123 break;
124 case 0:
125 cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
126 break;
127 case 1:
128 cifs_dbg(VFS, "disabling echoes and oplocks\n");
129 break;
130 case 2:
131 cifs_dbg(FYI, "disabling oplocks\n");
132 break;
133 default:
134 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
135 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400136}
137
138static void
139smb2_set_credits(struct TCP_Server_Info *server, const int val)
140{
141 spin_lock(&server->req_lock);
142 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500143 if (val == 1)
144 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400145 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500146 /* don't log while holding the lock */
147 if (val == 1)
148 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400149}
150
151static int *
152smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
153{
154 switch (optype) {
155 case CIFS_ECHO_OP:
156 return &server->echo_credits;
157 case CIFS_OBREAK_OP:
158 return &server->oplock_credits;
159 default:
160 return &server->credits;
161 }
162}
163
164static unsigned int
165smb2_get_credits(struct mid_q_entry *mid)
166{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000167 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700168
Pavel Shilovsky3d3003f2019-01-22 16:50:21 -0800169 if (mid->mid_state == MID_RESPONSE_RECEIVED
170 || mid->mid_state == MID_RESPONSE_MALFORMED)
171 return le16_to_cpu(shdr->CreditRequest);
172
173 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400174}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400175
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400176static int
177smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800178 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400179{
180 int rc = 0;
181 unsigned int scredits;
182
183 spin_lock(&server->req_lock);
184 while (1) {
185 if (server->credits <= 0) {
186 spin_unlock(&server->req_lock);
187 cifs_num_waiters_inc(server);
188 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000189 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400190 cifs_num_waiters_dec(server);
191 if (rc)
192 return rc;
193 spin_lock(&server->req_lock);
194 } else {
195 if (server->tcpStatus == CifsExiting) {
196 spin_unlock(&server->req_lock);
197 return -ENOENT;
198 }
199
200 scredits = server->credits;
201 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800202 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400203 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800204 credits->value = 0;
205 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400206 break;
207 }
208
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800209 /* leave some credits for reopen and other ops */
210 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400211 *num = min_t(unsigned int, size,
212 scredits * SMB2_MAX_BUFFER_SIZE);
213
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800214 credits->value =
215 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
216 credits->instance = server->reconnect_instance;
217 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400218 server->in_flight++;
219 break;
220 }
221 }
222 spin_unlock(&server->req_lock);
223 return rc;
224}
225
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800226static int
227smb2_adjust_credits(struct TCP_Server_Info *server,
228 struct cifs_credits *credits,
229 const unsigned int payload_size)
230{
231 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
232
233 if (!credits->value || credits->value == new_val)
234 return 0;
235
236 if (credits->value < new_val) {
237 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
238 credits->value, new_val);
239 return -ENOTSUPP;
240 }
241
242 spin_lock(&server->req_lock);
243
244 if (server->reconnect_instance != credits->instance) {
245 spin_unlock(&server->req_lock);
246 cifs_dbg(VFS, "trying to return %d credits to old session\n",
247 credits->value - new_val);
248 return -EAGAIN;
249 }
250
251 server->credits += credits->value - new_val;
252 spin_unlock(&server->req_lock);
253 wake_up(&server->request_q);
254 credits->value = new_val;
255 return 0;
256}
257
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400258static __u64
259smb2_get_next_mid(struct TCP_Server_Info *server)
260{
261 __u64 mid;
262 /* for SMB2 we need the current value */
263 spin_lock(&GlobalMid_Lock);
264 mid = server->CurrentMid++;
265 spin_unlock(&GlobalMid_Lock);
266 return mid;
267}
Steve French1080ef72011-02-24 18:07:19 +0000268
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800269static void
270smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
271{
272 spin_lock(&GlobalMid_Lock);
273 if (server->CurrentMid >= val)
274 server->CurrentMid -= val;
275 spin_unlock(&GlobalMid_Lock);
276}
277
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400278static struct mid_q_entry *
279smb2_find_mid(struct TCP_Server_Info *server, char *buf)
280{
281 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000282 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700283 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400284
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700285 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Christoph Probsta205d502019-05-08 21:36:25 +0200286 cifs_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600287 return NULL;
288 }
289
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400290 spin_lock(&GlobalMid_Lock);
291 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000292 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400293 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700294 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200295 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400296 spin_unlock(&GlobalMid_Lock);
297 return mid;
298 }
299 }
300 spin_unlock(&GlobalMid_Lock);
301 return NULL;
302}
303
304static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600305smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400306{
307#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000308 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400309
Joe Perchesf96637b2013-05-04 22:12:25 -0500310 cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700311 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
312 shdr->ProcessId);
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600313 cifs_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500314 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400315#endif
316}
317
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400318static bool
319smb2_need_neg(struct TCP_Server_Info *server)
320{
321 return server->max_read == 0;
322}
323
324static int
325smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
326{
327 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200328
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400329 ses->server->CurrentMid = 0;
330 rc = SMB2_negotiate(xid, ses);
331 /* BB we probably don't need to retry with modern servers */
332 if (rc == -EAGAIN)
333 rc = -EHOSTDOWN;
334 return rc;
335}
336
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700337static unsigned int
338smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
339{
340 struct TCP_Server_Info *server = tcon->ses->server;
341 unsigned int wsize;
342
343 /* start with specified wsize, or default */
344 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
345 wsize = min_t(unsigned int, wsize, server->max_write);
Long Li09902f82017-11-22 17:38:39 -0700346#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700347 if (server->rdma) {
348 if (server->sign)
349 wsize = min_t(unsigned int,
350 wsize, server->smbd_conn->max_fragmented_send_size);
351 else
352 wsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700353 wsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700354 }
Long Li09902f82017-11-22 17:38:39 -0700355#endif
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400356 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
357 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700358
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700359 return wsize;
360}
361
362static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500363smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
364{
365 struct TCP_Server_Info *server = tcon->ses->server;
366 unsigned int wsize;
367
368 /* start with specified wsize, or default */
369 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
370 wsize = min_t(unsigned int, wsize, server->max_write);
371#ifdef CONFIG_CIFS_SMB_DIRECT
372 if (server->rdma) {
373 if (server->sign)
374 wsize = min_t(unsigned int,
375 wsize, server->smbd_conn->max_fragmented_send_size);
376 else
377 wsize = min_t(unsigned int,
378 wsize, server->smbd_conn->max_readwrite_size);
379 }
380#endif
381 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
382 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
383
384 return wsize;
385}
386
387static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700388smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
389{
390 struct TCP_Server_Info *server = tcon->ses->server;
391 unsigned int rsize;
392
393 /* start with specified rsize, or default */
394 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
395 rsize = min_t(unsigned int, rsize, server->max_read);
Long Li09902f82017-11-22 17:38:39 -0700396#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700397 if (server->rdma) {
398 if (server->sign)
399 rsize = min_t(unsigned int,
400 rsize, server->smbd_conn->max_fragmented_recv_size);
401 else
402 rsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700403 rsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700404 }
Long Li09902f82017-11-22 17:38:39 -0700405#endif
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400406
407 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
408 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700409
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700410 return rsize;
411}
412
Steve French3d621232018-09-25 15:33:47 -0500413static unsigned int
414smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
415{
416 struct TCP_Server_Info *server = tcon->ses->server;
417 unsigned int rsize;
418
419 /* start with specified rsize, or default */
420 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
421 rsize = min_t(unsigned int, rsize, server->max_read);
422#ifdef CONFIG_CIFS_SMB_DIRECT
423 if (server->rdma) {
424 if (server->sign)
425 rsize = min_t(unsigned int,
426 rsize, server->smbd_conn->max_fragmented_recv_size);
427 else
428 rsize = min_t(unsigned int,
429 rsize, server->smbd_conn->max_readwrite_size);
430 }
431#endif
432
433 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
434 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
435
436 return rsize;
437}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200438
439static int
440parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
441 size_t buf_len,
442 struct cifs_server_iface **iface_list,
443 size_t *iface_count)
444{
445 struct network_interface_info_ioctl_rsp *p;
446 struct sockaddr_in *addr4;
447 struct sockaddr_in6 *addr6;
448 struct iface_info_ipv4 *p4;
449 struct iface_info_ipv6 *p6;
450 struct cifs_server_iface *info;
451 ssize_t bytes_left;
452 size_t next = 0;
453 int nb_iface = 0;
454 int rc = 0;
455
456 *iface_list = NULL;
457 *iface_count = 0;
458
459 /*
460 * Fist pass: count and sanity check
461 */
462
463 bytes_left = buf_len;
464 p = buf;
465 while (bytes_left >= sizeof(*p)) {
466 nb_iface++;
467 next = le32_to_cpu(p->Next);
468 if (!next) {
469 bytes_left -= sizeof(*p);
470 break;
471 }
472 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
473 bytes_left -= next;
474 }
475
476 if (!nb_iface) {
477 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
478 rc = -EINVAL;
479 goto out;
480 }
481
482 if (bytes_left || p->Next)
483 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
484
485
486 /*
487 * Second pass: extract info to internal structure
488 */
489
490 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
491 if (!*iface_list) {
492 rc = -ENOMEM;
493 goto out;
494 }
495
496 info = *iface_list;
497 bytes_left = buf_len;
498 p = buf;
499 while (bytes_left >= sizeof(*p)) {
500 info->speed = le64_to_cpu(p->LinkSpeed);
501 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
502 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
503
504 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
505 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
506 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
507 le32_to_cpu(p->Capability));
508
509 switch (p->Family) {
510 /*
511 * The kernel and wire socket structures have the same
512 * layout and use network byte order but make the
513 * conversion explicit in case either one changes.
514 */
515 case INTERNETWORK:
516 addr4 = (struct sockaddr_in *)&info->sockaddr;
517 p4 = (struct iface_info_ipv4 *)p->Buffer;
518 addr4->sin_family = AF_INET;
519 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
520
521 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
522 addr4->sin_port = cpu_to_be16(CIFS_PORT);
523
524 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
525 &addr4->sin_addr);
526 break;
527 case INTERNETWORKV6:
528 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
529 p6 = (struct iface_info_ipv6 *)p->Buffer;
530 addr6->sin6_family = AF_INET6;
531 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
532
533 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
534 addr6->sin6_flowinfo = 0;
535 addr6->sin6_scope_id = 0;
536 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
537
538 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
539 &addr6->sin6_addr);
540 break;
541 default:
542 cifs_dbg(VFS,
543 "%s: skipping unsupported socket family\n",
544 __func__);
545 goto next_iface;
546 }
547
548 (*iface_count)++;
549 info++;
550next_iface:
551 next = le32_to_cpu(p->Next);
552 if (!next)
553 break;
554 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
555 bytes_left -= next;
556 }
557
558 if (!*iface_count) {
559 rc = -EINVAL;
560 goto out;
561 }
562
563out:
564 if (rc) {
565 kfree(*iface_list);
566 *iface_count = 0;
567 *iface_list = NULL;
568 }
569 return rc;
570}
571
572
Steve Frenchc481e9f2013-10-14 01:21:53 -0500573static int
574SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
575{
576 int rc;
577 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200578 struct network_interface_info_ioctl_rsp *out_buf = NULL;
579 struct cifs_server_iface *iface_list;
580 size_t iface_count;
581 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500582
583 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
584 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
585 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500586 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500587 if (rc == -EOPNOTSUPP) {
588 cifs_dbg(FYI,
589 "server does not support query network interfaces\n");
590 goto out;
591 } else if (rc != 0) {
Steve French9ffc5412014-10-16 15:13:14 -0500592 cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200593 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500594 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200595
596 rc = parse_server_interfaces(out_buf, ret_data_len,
597 &iface_list, &iface_count);
598 if (rc)
599 goto out;
600
601 spin_lock(&ses->iface_lock);
602 kfree(ses->iface_list);
603 ses->iface_list = iface_list;
604 ses->iface_count = iface_count;
605 ses->iface_last_update = jiffies;
606 spin_unlock(&ses->iface_lock);
607
608out:
Steve French24df1482016-09-29 04:20:23 -0500609 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500610 return rc;
611}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500612
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000613static void
614smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000615{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000616 struct cached_fid *cfid = container_of(ref, struct cached_fid,
617 refcount);
618
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000619 if (cfid->is_valid) {
620 cifs_dbg(FYI, "clear cached root file handle\n");
621 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
622 cfid->fid->volatile_fid);
623 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000624 cfid->file_all_info_is_valid = false;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000625 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000626}
627
628void close_shroot(struct cached_fid *cfid)
629{
630 mutex_lock(&cfid->fid_mutex);
631 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000632 mutex_unlock(&cfid->fid_mutex);
633}
634
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000635void
636smb2_cached_lease_break(struct work_struct *work)
637{
638 struct cached_fid *cfid = container_of(work,
639 struct cached_fid, lease_break);
640
641 close_shroot(cfid);
642}
643
Steve French3d4ef9a2018-04-25 22:19:09 -0500644/*
645 * Open the directory at the root of a share
646 */
647int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
648{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000649 struct cifs_ses *ses = tcon->ses;
650 struct TCP_Server_Info *server = ses->server;
651 struct cifs_open_parms oparms;
652 struct smb2_create_rsp *o_rsp = NULL;
653 struct smb2_query_info_rsp *qi_rsp = NULL;
654 int resp_buftype[2];
655 struct smb_rqst rqst[2];
656 struct kvec rsp_iov[2];
657 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
658 struct kvec qi_iov[1];
659 int rc, flags = 0;
660 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000661 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Steve French3d4ef9a2018-04-25 22:19:09 -0500662
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000663 mutex_lock(&tcon->crfid.fid_mutex);
664 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500665 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000666 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000667 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000668 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500669 return 0;
670 }
671
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000672 if (smb3_encryption_required(tcon))
673 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500674
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000675 memset(rqst, 0, sizeof(rqst));
676 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
677 memset(rsp_iov, 0, sizeof(rsp_iov));
678
679 /* Open */
680 memset(&open_iov, 0, sizeof(open_iov));
681 rqst[0].rq_iov = open_iov;
682 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
683
684 oparms.tcon = tcon;
685 oparms.create_options = 0;
686 oparms.desired_access = FILE_READ_ATTRIBUTES;
687 oparms.disposition = FILE_OPEN;
688 oparms.fid = pfid;
689 oparms.reconnect = false;
690
691 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
692 if (rc)
693 goto oshr_exit;
694 smb2_set_next_command(tcon, &rqst[0]);
695
696 memset(&qi_iov, 0, sizeof(qi_iov));
697 rqst[1].rq_iov = qi_iov;
698 rqst[1].rq_nvec = 1;
699
700 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
701 COMPOUND_FID, FILE_ALL_INFORMATION,
702 SMB2_O_INFO_FILE, 0,
703 sizeof(struct smb2_file_all_info) +
704 PATH_MAX * 2, 0, NULL);
705 if (rc)
706 goto oshr_exit;
707
708 smb2_set_related(&rqst[1]);
709
710 rc = compound_send_recv(xid, ses, flags, 2, rqst,
711 resp_buftype, rsp_iov);
712 if (rc)
713 goto oshr_exit;
714
715 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
716 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
717 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
718#ifdef CONFIG_CIFS_DEBUG2
719 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
720#endif /* CIFS_DEBUG2 */
721
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000722 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
723 tcon->crfid.tcon = tcon;
724 tcon->crfid.is_valid = true;
725 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000726
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000727 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
728 kref_get(&tcon->crfid.refcount);
729 oplock = smb2_parse_lease_state(server, o_rsp,
730 &oparms.fid->epoch,
731 oparms.fid->lease_key);
732 } else
733 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000734
735 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
736 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
737 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000738 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000739 le16_to_cpu(qi_rsp->OutputBufferOffset),
740 sizeof(struct smb2_file_all_info),
741 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000742 (char *)&tcon->crfid.file_all_info))
743 tcon->crfid.file_all_info_is_valid = 1;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000744
745 oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000746 mutex_unlock(&tcon->crfid.fid_mutex);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000747 SMB2_open_free(&rqst[0]);
748 SMB2_query_info_free(&rqst[1]);
749 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
750 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French3d4ef9a2018-04-25 22:19:09 -0500751 return rc;
752}
753
Steve French34f62642013-10-09 02:07:00 -0500754static void
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500755smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
756{
757 int rc;
758 __le16 srch_path = 0; /* Null - open root of share */
759 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
760 struct cifs_open_parms oparms;
761 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500762 bool no_cached_open = tcon->nohandlecache;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500763
764 oparms.tcon = tcon;
765 oparms.desired_access = FILE_READ_ATTRIBUTES;
766 oparms.disposition = FILE_OPEN;
767 oparms.create_options = 0;
768 oparms.fid = &fid;
769 oparms.reconnect = false;
770
Steve French3d4ef9a2018-04-25 22:19:09 -0500771 if (no_cached_open)
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000772 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
773 NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500774 else
775 rc = open_shroot(xid, tcon, &fid);
776
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500777 if (rc)
778 return;
779
Steve Frenchc481e9f2013-10-14 01:21:53 -0500780 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500781
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500782 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
783 FS_ATTRIBUTE_INFORMATION);
784 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
785 FS_DEVICE_INFORMATION);
786 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500787 FS_VOLUME_INFORMATION);
788 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500789 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500790 if (no_cached_open)
791 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000792 else
793 close_shroot(&tcon->crfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500794}
795
796static void
Steve French34f62642013-10-09 02:07:00 -0500797smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
798{
799 int rc;
800 __le16 srch_path = 0; /* Null - open root of share */
801 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
802 struct cifs_open_parms oparms;
803 struct cifs_fid fid;
804
805 oparms.tcon = tcon;
806 oparms.desired_access = FILE_READ_ATTRIBUTES;
807 oparms.disposition = FILE_OPEN;
808 oparms.create_options = 0;
809 oparms.fid = &fid;
810 oparms.reconnect = false;
811
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000812 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500813 if (rc)
814 return;
815
Steven French21671142013-10-09 13:36:35 -0500816 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
817 FS_ATTRIBUTE_INFORMATION);
818 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
819 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500820 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -0500821}
822
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400823static int
824smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
825 struct cifs_sb_info *cifs_sb, const char *full_path)
826{
827 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400828 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700829 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400830 struct cifs_open_parms oparms;
831 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400832
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000833 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500834 return 0;
835
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400836 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
837 if (!utf16_path)
838 return -ENOMEM;
839
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400840 oparms.tcon = tcon;
841 oparms.desired_access = FILE_READ_ATTRIBUTES;
842 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -0500843 if (backup_cred(cifs_sb))
844 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
845 else
846 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400847 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400848 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400849
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000850 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400851 if (rc) {
852 kfree(utf16_path);
853 return rc;
854 }
855
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400856 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400857 kfree(utf16_path);
858 return rc;
859}
860
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400861static int
862smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
863 struct cifs_sb_info *cifs_sb, const char *full_path,
864 u64 *uniqueid, FILE_ALL_INFO *data)
865{
866 *uniqueid = le64_to_cpu(data->IndexNumber);
867 return 0;
868}
869
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700870static int
871smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
872 struct cifs_fid *fid, FILE_ALL_INFO *data)
873{
874 int rc;
875 struct smb2_file_all_info *smb2_data;
876
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400877 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700878 GFP_KERNEL);
879 if (smb2_data == NULL)
880 return -ENOMEM;
881
882 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
883 smb2_data);
884 if (!rc)
885 move_smb2_info_to_cifs(data, smb2_data);
886 kfree(smb2_data);
887 return rc;
888}
889
Arnd Bergmann1368f152017-09-05 11:24:15 +0200890#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000891static ssize_t
892move_smb2_ea_to_cifs(char *dst, size_t dst_size,
893 struct smb2_file_full_ea_info *src, size_t src_size,
894 const unsigned char *ea_name)
895{
896 int rc = 0;
897 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
898 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000899 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000900 size_t name_len, value_len, user_name_len;
901
902 while (src_size > 0) {
903 name = &src->ea_data[0];
904 name_len = (size_t)src->ea_name_length;
905 value = &src->ea_data[src->ea_name_length + 1];
906 value_len = (size_t)le16_to_cpu(src->ea_value_length);
907
Christoph Probsta205d502019-05-08 21:36:25 +0200908 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000909 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000910
911 if (src_size < 8 + name_len + 1 + value_len) {
912 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
913 rc = -EIO;
914 goto out;
915 }
916
917 if (ea_name) {
918 if (ea_name_len == name_len &&
919 memcmp(ea_name, name, name_len) == 0) {
920 rc = value_len;
921 if (dst_size == 0)
922 goto out;
923 if (dst_size < value_len) {
924 rc = -ERANGE;
925 goto out;
926 }
927 memcpy(dst, value, value_len);
928 goto out;
929 }
930 } else {
931 /* 'user.' plus a terminating null */
932 user_name_len = 5 + 1 + name_len;
933
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000934 if (buf_size == 0) {
935 /* skip copy - calc size only */
936 rc += user_name_len;
937 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000938 dst_size -= user_name_len;
939 memcpy(dst, "user.", 5);
940 dst += 5;
941 memcpy(dst, src->ea_data, name_len);
942 dst += name_len;
943 *dst = 0;
944 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000945 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000946 } else {
947 /* stop before overrun buffer */
948 rc = -ERANGE;
949 break;
950 }
951 }
952
953 if (!src->next_entry_offset)
954 break;
955
956 if (src_size < le32_to_cpu(src->next_entry_offset)) {
957 /* stop before overrun buffer */
958 rc = -ERANGE;
959 break;
960 }
961 src_size -= le32_to_cpu(src->next_entry_offset);
962 src = (void *)((char *)src +
963 le32_to_cpu(src->next_entry_offset));
964 }
965
966 /* didn't find the named attribute */
967 if (ea_name)
968 rc = -ENODATA;
969
970out:
971 return (ssize_t)rc;
972}
973
974static ssize_t
975smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
976 const unsigned char *path, const unsigned char *ea_name,
977 char *ea_data, size_t buf_size,
978 struct cifs_sb_info *cifs_sb)
979{
980 int rc;
981 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000982 struct kvec rsp_iov = {NULL, 0};
983 int buftype = CIFS_NO_BUFFER;
984 struct smb2_query_info_rsp *rsp;
985 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000986
987 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
988 if (!utf16_path)
989 return -ENOMEM;
990
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000991 rc = smb2_query_info_compound(xid, tcon, utf16_path,
992 FILE_READ_EA,
993 FILE_FULL_EA_INFORMATION,
994 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +1000995 CIFSMaxBufSize -
996 MAX_SMB2_CREATE_RESPONSE_SIZE -
997 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000998 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000999 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001000 /*
1001 * If ea_name is NULL (listxattr) and there are no EAs,
1002 * return 0 as it's not an error. Otherwise, the specified
1003 * ea_name was not found.
1004 */
1005 if (!ea_name && rc == -ENODATA)
1006 rc = 0;
1007 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001008 }
1009
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001010 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1011 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1012 le32_to_cpu(rsp->OutputBufferLength),
1013 &rsp_iov,
1014 sizeof(struct smb2_file_full_ea_info));
1015 if (rc)
1016 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001017
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001018 info = (struct smb2_file_full_ea_info *)(
1019 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1020 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1021 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001022
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001023 qeas_exit:
1024 kfree(utf16_path);
1025 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001026 return rc;
1027}
1028
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001029
1030static int
1031smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1032 const char *path, const char *ea_name, const void *ea_value,
1033 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1034 struct cifs_sb_info *cifs_sb)
1035{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001036 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001037 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001038 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001039 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001040 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001041 struct smb_rqst rqst[3];
1042 int resp_buftype[3];
1043 struct kvec rsp_iov[3];
1044 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1045 struct cifs_open_parms oparms;
1046 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1047 struct cifs_fid fid;
1048 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1049 unsigned int size[1];
1050 void *data[1];
1051 struct smb2_file_full_ea_info *ea = NULL;
1052 struct kvec close_iov[1];
1053 int rc;
1054
1055 if (smb3_encryption_required(tcon))
1056 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001057
1058 if (ea_name_len > 255)
1059 return -EINVAL;
1060
1061 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1062 if (!utf16_path)
1063 return -ENOMEM;
1064
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001065 memset(rqst, 0, sizeof(rqst));
1066 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1067 memset(rsp_iov, 0, sizeof(rsp_iov));
1068
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001069 if (ses->server->ops->query_all_EAs) {
1070 if (!ea_value) {
1071 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1072 ea_name, NULL, 0,
1073 cifs_sb);
1074 if (rc == -ENODATA)
1075 goto sea_exit;
1076 }
1077 }
1078
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001079 /* Open */
1080 memset(&open_iov, 0, sizeof(open_iov));
1081 rqst[0].rq_iov = open_iov;
1082 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1083
1084 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001085 oparms.tcon = tcon;
1086 oparms.desired_access = FILE_WRITE_EA;
1087 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001088 if (backup_cred(cifs_sb))
1089 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1090 else
1091 oparms.create_options = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001092 oparms.fid = &fid;
1093 oparms.reconnect = false;
1094
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001095 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
1096 if (rc)
1097 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001098 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001099
1100
1101 /* Set Info */
1102 memset(&si_iov, 0, sizeof(si_iov));
1103 rqst[1].rq_iov = si_iov;
1104 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001105
1106 len = sizeof(ea) + ea_name_len + ea_value_len + 1;
1107 ea = kzalloc(len, GFP_KERNEL);
1108 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001109 rc = -ENOMEM;
1110 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001111 }
1112
1113 ea->ea_name_length = ea_name_len;
1114 ea->ea_value_length = cpu_to_le16(ea_value_len);
1115 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1116 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1117
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001118 size[0] = len;
1119 data[0] = ea;
1120
1121 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
1122 COMPOUND_FID, current->tgid,
1123 FILE_FULL_EA_INFORMATION,
1124 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001125 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001126 smb2_set_related(&rqst[1]);
1127
1128
1129 /* Close */
1130 memset(&close_iov, 0, sizeof(close_iov));
1131 rqst[2].rq_iov = close_iov;
1132 rqst[2].rq_nvec = 1;
1133 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
1134 smb2_set_related(&rqst[2]);
1135
1136 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1137 resp_buftype, rsp_iov);
1138
1139 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001140 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001141 kfree(utf16_path);
1142 SMB2_open_free(&rqst[0]);
1143 SMB2_set_info_free(&rqst[1]);
1144 SMB2_close_free(&rqst[2]);
1145 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1146 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1147 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001148 return rc;
1149}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001150#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001151
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001152static bool
1153smb2_can_echo(struct TCP_Server_Info *server)
1154{
1155 return server->echoes;
1156}
1157
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001158static void
1159smb2_clear_stats(struct cifs_tcon *tcon)
1160{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001161 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001162
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001163 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1164 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1165 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1166 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001167}
1168
1169static void
Steve French769ee6a2013-06-19 14:15:30 -05001170smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1171{
1172 seq_puts(m, "\n\tShare Capabilities:");
1173 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1174 seq_puts(m, " DFS,");
1175 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1176 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1177 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1178 seq_puts(m, " SCALEOUT,");
1179 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1180 seq_puts(m, " CLUSTER,");
1181 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1182 seq_puts(m, " ASYMMETRIC,");
1183 if (tcon->capabilities == 0)
1184 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001185 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1186 seq_puts(m, " Aligned,");
1187 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1188 seq_puts(m, " Partition Aligned,");
1189 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1190 seq_puts(m, " SSD,");
1191 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1192 seq_puts(m, " TRIM-support,");
1193
Steve French769ee6a2013-06-19 14:15:30 -05001194 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001195 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001196 if (tcon->perf_sector_size)
1197 seq_printf(m, "\tOptimal sector size: 0x%x",
1198 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001199 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001200}
1201
1202static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001203smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1204{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001205 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1206 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001207
1208 /*
1209 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1210 * totals (requests sent) since those SMBs are per-session not per tcon
1211 */
Steve French52ce1ac2018-07-31 01:46:47 -05001212 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1213 (long long)(tcon->bytes_read),
1214 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001215 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1216 atomic_read(&tcon->num_local_opens),
1217 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001218 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001219 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1220 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001221 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001222 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1223 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001224 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001225 atomic_read(&sent[SMB2_CREATE_HE]),
1226 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001227 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001228 atomic_read(&sent[SMB2_CLOSE_HE]),
1229 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001230 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001231 atomic_read(&sent[SMB2_FLUSH_HE]),
1232 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001233 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001234 atomic_read(&sent[SMB2_READ_HE]),
1235 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001236 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001237 atomic_read(&sent[SMB2_WRITE_HE]),
1238 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001239 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001240 atomic_read(&sent[SMB2_LOCK_HE]),
1241 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001242 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001243 atomic_read(&sent[SMB2_IOCTL_HE]),
1244 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001245 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001246 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1247 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001248 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001249 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1250 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001251 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001252 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1253 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001254 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001255 atomic_read(&sent[SMB2_SET_INFO_HE]),
1256 atomic_read(&failed[SMB2_SET_INFO_HE]));
1257 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1258 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1259 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001260}
1261
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001262static void
1263smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1264{
David Howells2b0143b2015-03-17 22:25:59 +00001265 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001266 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1267
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001268 cfile->fid.persistent_fid = fid->persistent_fid;
1269 cfile->fid.volatile_fid = fid->volatile_fid;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001270#ifdef CONFIG_CIFS_DEBUG2
1271 cfile->fid.mid = fid->mid;
1272#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001273 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1274 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001275 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001276 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001277}
1278
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001279static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001280smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1281 struct cifs_fid *fid)
1282{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001283 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001284}
1285
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001286static int
Steve French41c13582013-11-14 00:05:36 -06001287SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1288 u64 persistent_fid, u64 volatile_fid,
1289 struct copychunk_ioctl *pcchunk)
1290{
1291 int rc;
1292 unsigned int ret_data_len;
1293 struct resume_key_req *res_key;
1294
1295 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1296 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001297 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001298 (char **)&res_key, &ret_data_len);
1299
1300 if (rc) {
1301 cifs_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
1302 goto req_res_key_exit;
1303 }
1304 if (ret_data_len < sizeof(struct resume_key_req)) {
1305 cifs_dbg(VFS, "Invalid refcopy resume key length\n");
1306 rc = -EINVAL;
1307 goto req_res_key_exit;
1308 }
1309 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1310
1311req_res_key_exit:
1312 kfree(res_key);
1313 return rc;
1314}
1315
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001316static int
1317smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001318 struct cifs_tcon *tcon,
1319 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001320 unsigned long p)
1321{
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001322 struct cifs_ses *ses = tcon->ses;
1323 char __user *arg = (char __user *)p;
1324 struct smb_query_info qi;
1325 struct smb_query_info __user *pqi;
1326 int rc = 0;
1327 int flags = 0;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001328 struct smb2_query_info_rsp *qi_rsp = NULL;
1329 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001330 void *buffer = NULL;
1331 struct smb_rqst rqst[3];
1332 int resp_buftype[3];
1333 struct kvec rsp_iov[3];
1334 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1335 struct cifs_open_parms oparms;
1336 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1337 struct cifs_fid fid;
1338 struct kvec qi_iov[1];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001339 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001340 struct kvec close_iov[1];
1341
1342 memset(rqst, 0, sizeof(rqst));
1343 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1344 memset(rsp_iov, 0, sizeof(rsp_iov));
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001345
1346 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1347 return -EFAULT;
1348
1349 if (qi.output_buffer_length > 1024)
1350 return -EINVAL;
1351
1352 if (!ses || !(ses->server))
1353 return -EIO;
1354
1355 if (smb3_encryption_required(tcon))
1356 flags |= CIFS_TRANSFORM_REQ;
1357
1358 buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
1359 if (buffer == NULL)
1360 return -ENOMEM;
1361
1362 if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
1363 qi.output_buffer_length)) {
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001364 rc = -EFAULT;
1365 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001366 }
1367
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001368 /* Open */
1369 memset(&open_iov, 0, sizeof(open_iov));
1370 rqst[0].rq_iov = open_iov;
1371 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001372
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001373 memset(&oparms, 0, sizeof(oparms));
1374 oparms.tcon = tcon;
1375 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
1376 oparms.disposition = FILE_OPEN;
1377 if (is_dir)
1378 oparms.create_options = CREATE_NOT_FILE;
1379 else
1380 oparms.create_options = CREATE_NOT_DIR;
1381 oparms.fid = &fid;
1382 oparms.reconnect = false;
1383
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001384 /*
1385 * FSCTL codes encode the special access they need in the fsctl code.
1386 */
1387 if (qi.flags & PASSTHRU_FSCTL) {
1388 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1389 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1390 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001391 break;
1392 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1393 oparms.desired_access = GENERIC_ALL;
1394 break;
1395 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1396 oparms.desired_access = GENERIC_READ;
1397 break;
1398 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1399 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001400 break;
1401 }
1402 }
1403
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001404 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1405 if (rc)
1406 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001407 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001408
1409 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001410 if (qi.flags & PASSTHRU_FSCTL) {
1411 /* Can eventually relax perm check since server enforces too */
1412 if (!capable(CAP_SYS_ADMIN))
1413 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001414 else {
1415 memset(&io_iov, 0, sizeof(io_iov));
1416 rqst[1].rq_iov = io_iov;
1417 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1418
1419 rc = SMB2_ioctl_init(tcon, &rqst[1],
1420 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001421 qi.info_type, true, buffer,
1422 qi.output_buffer_length,
1423 CIFSMaxBufSize);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001424 }
Steve French31ba4332019-03-13 02:40:07 -05001425 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1426 memset(&qi_iov, 0, sizeof(qi_iov));
1427 rqst[1].rq_iov = qi_iov;
1428 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001429
Steve French31ba4332019-03-13 02:40:07 -05001430 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
1431 COMPOUND_FID, qi.file_info_class,
1432 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001433 qi.input_buffer_length,
1434 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001435 } else { /* unknown flags */
1436 cifs_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
1437 rc = -EINVAL;
1438 }
1439
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001440 if (rc)
1441 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001442 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001443 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001444
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001445 /* Close */
1446 memset(&close_iov, 0, sizeof(close_iov));
1447 rqst[2].rq_iov = close_iov;
1448 rqst[2].rq_nvec = 1;
1449
1450 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001451 if (rc)
1452 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001453 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001454
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001455 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1456 resp_buftype, rsp_iov);
1457 if (rc)
1458 goto iqinf_exit;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001459 if (qi.flags & PASSTHRU_FSCTL) {
1460 pqi = (struct smb_query_info __user *)arg;
1461 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1462 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1463 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001464 if (qi.input_buffer_length > 0 &&
1465 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length > rsp_iov[1].iov_len) {
1466 rc = -EFAULT;
1467 goto iqinf_exit;
1468 }
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001469 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1470 sizeof(qi.input_buffer_length))) {
1471 rc = -EFAULT;
1472 goto iqinf_exit;
1473 }
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001474 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1475 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
1476 qi.input_buffer_length)) {
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001477 rc = -EFAULT;
1478 goto iqinf_exit;
1479 }
1480 } else {
1481 pqi = (struct smb_query_info __user *)arg;
1482 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1483 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1484 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
1485 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1486 sizeof(qi.input_buffer_length))) {
1487 rc = -EFAULT;
1488 goto iqinf_exit;
1489 }
1490 if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) {
1491 rc = -EFAULT;
1492 goto iqinf_exit;
1493 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001494 }
1495
1496 iqinf_exit:
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001497 kfree(buffer);
1498 SMB2_open_free(&rqst[0]);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001499 if (qi.flags & PASSTHRU_FSCTL)
1500 SMB2_ioctl_free(&rqst[1]);
1501 else
1502 SMB2_query_info_free(&rqst[1]);
1503
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001504 SMB2_close_free(&rqst[2]);
1505 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1506 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1507 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001508 return rc;
1509}
1510
Sachin Prabhu620d8742017-02-10 16:03:51 +05301511static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001512smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001513 struct cifsFileInfo *srcfile,
1514 struct cifsFileInfo *trgtfile, u64 src_off,
1515 u64 len, u64 dest_off)
1516{
1517 int rc;
1518 unsigned int ret_data_len;
1519 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001520 struct copychunk_ioctl_rsp *retbuf = NULL;
1521 struct cifs_tcon *tcon;
1522 int chunks_copied = 0;
1523 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301524 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001525
1526 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1527
1528 if (pcchunk == NULL)
1529 return -ENOMEM;
1530
Christoph Probsta205d502019-05-08 21:36:25 +02001531 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001532 /* Request a key from the server to identify the source of the copy */
1533 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1534 srcfile->fid.persistent_fid,
1535 srcfile->fid.volatile_fid, pcchunk);
1536
1537 /* Note: request_res_key sets res_key null only if rc !=0 */
1538 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001539 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001540
1541 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001542 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001543 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001544 pcchunk->Reserved2 = 0;
1545
Steve French9bf0c9c2013-11-16 18:05:28 -06001546 tcon = tlink_tcon(trgtfile->tlink);
1547
1548 while (len > 0) {
1549 pcchunk->SourceOffset = cpu_to_le64(src_off);
1550 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1551 pcchunk->Length =
1552 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1553
1554 /* Request server copy to target from src identified by key */
1555 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001556 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001557 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001558 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1559 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001560 if (rc == 0) {
1561 if (ret_data_len !=
1562 sizeof(struct copychunk_ioctl_rsp)) {
1563 cifs_dbg(VFS, "invalid cchunk response size\n");
1564 rc = -EIO;
1565 goto cchunk_out;
1566 }
1567 if (retbuf->TotalBytesWritten == 0) {
1568 cifs_dbg(FYI, "no bytes copied\n");
1569 rc = -EIO;
1570 goto cchunk_out;
1571 }
1572 /*
1573 * Check if server claimed to write more than we asked
1574 */
1575 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1576 le32_to_cpu(pcchunk->Length)) {
1577 cifs_dbg(VFS, "invalid copy chunk response\n");
1578 rc = -EIO;
1579 goto cchunk_out;
1580 }
1581 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
1582 cifs_dbg(VFS, "invalid num chunks written\n");
1583 rc = -EIO;
1584 goto cchunk_out;
1585 }
1586 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001587
Sachin Prabhu620d8742017-02-10 16:03:51 +05301588 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1589 src_off += bytes_written;
1590 dest_off += bytes_written;
1591 len -= bytes_written;
1592 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001593
Sachin Prabhu620d8742017-02-10 16:03:51 +05301594 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001595 le32_to_cpu(retbuf->ChunksWritten),
1596 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301597 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001598 } else if (rc == -EINVAL) {
1599 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1600 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001601
Steve French9bf0c9c2013-11-16 18:05:28 -06001602 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1603 le32_to_cpu(retbuf->ChunksWritten),
1604 le32_to_cpu(retbuf->ChunkBytesWritten),
1605 le32_to_cpu(retbuf->TotalBytesWritten));
1606
1607 /*
1608 * Check if this is the first request using these sizes,
1609 * (ie check if copy succeed once with original sizes
1610 * and check if the server gave us different sizes after
1611 * we already updated max sizes on previous request).
1612 * if not then why is the server returning an error now
1613 */
1614 if ((chunks_copied != 0) || chunk_sizes_updated)
1615 goto cchunk_out;
1616
1617 /* Check that server is not asking us to grow size */
1618 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1619 tcon->max_bytes_chunk)
1620 tcon->max_bytes_chunk =
1621 le32_to_cpu(retbuf->ChunkBytesWritten);
1622 else
1623 goto cchunk_out; /* server gave us bogus size */
1624
1625 /* No need to change MaxChunks since already set to 1 */
1626 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001627 } else
1628 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001629 }
1630
1631cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001632 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001633 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301634 if (rc)
1635 return rc;
1636 else
1637 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001638}
1639
1640static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001641smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1642 struct cifs_fid *fid)
1643{
1644 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1645}
1646
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001647static unsigned int
1648smb2_read_data_offset(char *buf)
1649{
1650 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001651
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001652 return rsp->DataOffset;
1653}
1654
1655static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001656smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001657{
1658 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001659
1660 if (in_remaining)
1661 return le32_to_cpu(rsp->DataRemaining);
1662
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001663 return le32_to_cpu(rsp->DataLength);
1664}
1665
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001666
1667static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001668smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001669 struct cifs_io_parms *parms, unsigned int *bytes_read,
1670 char **buf, int *buf_type)
1671{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001672 parms->persistent_fid = pfid->persistent_fid;
1673 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001674 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1675}
1676
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001677static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001678smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001679 struct cifs_io_parms *parms, unsigned int *written,
1680 struct kvec *iov, unsigned long nr_segs)
1681{
1682
Steve Frenchdb8b6312014-09-22 05:13:55 -05001683 parms->persistent_fid = pfid->persistent_fid;
1684 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001685 return SMB2_write(xid, parms, written, iov, nr_segs);
1686}
1687
Steve Frenchd43cc792014-08-13 17:16:29 -05001688/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1689static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1690 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1691{
1692 struct cifsInodeInfo *cifsi;
1693 int rc;
1694
1695 cifsi = CIFS_I(inode);
1696
1697 /* if file already sparse don't bother setting sparse again */
1698 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1699 return true; /* already sparse */
1700
1701 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1702 return true; /* already not sparse */
1703
1704 /*
1705 * Can't check for sparse support on share the usual way via the
1706 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1707 * since Samba server doesn't set the flag on the share, yet
1708 * supports the set sparse FSCTL and returns sparse correctly
1709 * in the file attributes. If we fail setting sparse though we
1710 * mark that server does not support sparse files for this share
1711 * to avoid repeatedly sending the unsupported fsctl to server
1712 * if the file is repeatedly extended.
1713 */
1714 if (tcon->broken_sparse_sup)
1715 return false;
1716
1717 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1718 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001719 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05001720 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001721 if (rc) {
1722 tcon->broken_sparse_sup = true;
1723 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1724 return false;
1725 }
1726
1727 if (setsparse)
1728 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1729 else
1730 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1731
1732 return true;
1733}
1734
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001735static int
1736smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1737 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1738{
1739 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001740 struct inode *inode;
1741
1742 /*
1743 * If extending file more than one page make sparse. Many Linux fs
1744 * make files sparse by default when extending via ftruncate
1745 */
David Howells2b0143b2015-03-17 22:25:59 +00001746 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001747
1748 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001749 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001750
Steve Frenchd43cc792014-08-13 17:16:29 -05001751 /* whether set sparse succeeds or not, extend the file */
1752 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001753 }
1754
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001755 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001756 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001757}
1758
Steve French02b16662015-06-27 21:18:36 -07001759static int
1760smb2_duplicate_extents(const unsigned int xid,
1761 struct cifsFileInfo *srcfile,
1762 struct cifsFileInfo *trgtfile, u64 src_off,
1763 u64 len, u64 dest_off)
1764{
1765 int rc;
1766 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001767 struct duplicate_extents_to_file dup_ext_buf;
1768 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1769
1770 /* server fileays advertise duplicate extent support with this flag */
1771 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1772 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1773 return -EOPNOTSUPP;
1774
1775 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1776 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1777 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1778 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1779 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02001780 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07001781 src_off, dest_off, len);
1782
1783 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1784 if (rc)
1785 goto duplicate_extents_out;
1786
1787 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1788 trgtfile->fid.volatile_fid,
1789 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001790 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001791 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001792 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05001793 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07001794 &ret_data_len);
1795
1796 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02001797 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07001798
1799duplicate_extents_out:
1800 return rc;
1801}
Steve French02b16662015-06-27 21:18:36 -07001802
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001803static int
Steve French64a5cfa2013-10-14 15:31:32 -05001804smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1805 struct cifsFileInfo *cfile)
1806{
1807 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1808 cfile->fid.volatile_fid);
1809}
1810
1811static int
Steve Frenchb3152e22015-06-24 03:17:02 -05001812smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1813 struct cifsFileInfo *cfile)
1814{
1815 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05001816 unsigned int ret_data_len;
1817
1818 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1819 integr_info.Flags = 0;
1820 integr_info.Reserved = 0;
1821
1822 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1823 cfile->fid.volatile_fid,
1824 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001825 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001826 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05001827 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05001828 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05001829 &ret_data_len);
1830
1831}
1832
Steve Frenche02789a2018-08-09 14:33:12 -05001833/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1834#define GMT_TOKEN_SIZE 50
1835
Steve French153322f2019-03-28 22:32:49 -05001836#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1837
Steve Frenche02789a2018-08-09 14:33:12 -05001838/*
1839 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1840 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1841 */
Steve Frenchb3152e22015-06-24 03:17:02 -05001842static int
Steve French834170c2016-09-30 21:14:26 -05001843smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1844 struct cifsFileInfo *cfile, void __user *ioc_buf)
1845{
1846 char *retbuf = NULL;
1847 unsigned int ret_data_len = 0;
1848 int rc;
Steve French153322f2019-03-28 22:32:49 -05001849 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05001850 struct smb_snapshot_array snapshot_in;
1851
Steve French973189a2019-04-04 00:41:04 -05001852 /*
1853 * On the first query to enumerate the list of snapshots available
1854 * for this volume the buffer begins with 0 (number of snapshots
1855 * which can be returned is zero since at that point we do not know
1856 * how big the buffer needs to be). On the second query,
1857 * it (ret_data_len) is set to number of snapshots so we can
1858 * know to set the maximum response size larger (see below).
1859 */
Steve French153322f2019-03-28 22:32:49 -05001860 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
1861 return -EFAULT;
1862
1863 /*
1864 * Note that for snapshot queries that servers like Azure expect that
1865 * the first query be minimal size (and just used to get the number/size
1866 * of previous versions) so response size must be specified as EXACTLY
1867 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
1868 * of eight bytes.
1869 */
1870 if (ret_data_len == 0)
1871 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
1872 else
1873 max_response_size = CIFSMaxBufSize;
1874
Steve French834170c2016-09-30 21:14:26 -05001875 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1876 cfile->fid.volatile_fid,
1877 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001878 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001879 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05001880 (char **)&retbuf,
1881 &ret_data_len);
1882 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
1883 rc, ret_data_len);
1884 if (rc)
1885 return rc;
1886
1887 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
1888 /* Fixup buffer */
1889 if (copy_from_user(&snapshot_in, ioc_buf,
1890 sizeof(struct smb_snapshot_array))) {
1891 rc = -EFAULT;
1892 kfree(retbuf);
1893 return rc;
1894 }
Steve French834170c2016-09-30 21:14:26 -05001895
Steve Frenche02789a2018-08-09 14:33:12 -05001896 /*
1897 * Check for min size, ie not large enough to fit even one GMT
1898 * token (snapshot). On the first ioctl some users may pass in
1899 * smaller size (or zero) to simply get the size of the array
1900 * so the user space caller can allocate sufficient memory
1901 * and retry the ioctl again with larger array size sufficient
1902 * to hold all of the snapshot GMT tokens on the second try.
1903 */
1904 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
1905 ret_data_len = sizeof(struct smb_snapshot_array);
1906
1907 /*
1908 * We return struct SRV_SNAPSHOT_ARRAY, followed by
1909 * the snapshot array (of 50 byte GMT tokens) each
1910 * representing an available previous version of the data
1911 */
1912 if (ret_data_len > (snapshot_in.snapshot_array_size +
1913 sizeof(struct smb_snapshot_array)))
1914 ret_data_len = snapshot_in.snapshot_array_size +
1915 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05001916
1917 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
1918 rc = -EFAULT;
1919 }
1920
1921 kfree(retbuf);
1922 return rc;
1923}
1924
1925static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001926smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
1927 const char *path, struct cifs_sb_info *cifs_sb,
1928 struct cifs_fid *fid, __u16 search_flags,
1929 struct cifs_search_info *srch_inf)
1930{
1931 __le16 *utf16_path;
1932 int rc;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001933 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001934 struct cifs_open_parms oparms;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001935
1936 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1937 if (!utf16_path)
1938 return -ENOMEM;
1939
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001940 oparms.tcon = tcon;
1941 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
1942 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001943 if (backup_cred(cifs_sb))
1944 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1945 else
1946 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001947 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001948 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001949
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10001950 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001951 kfree(utf16_path);
1952 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07001953 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001954 return rc;
1955 }
1956
1957 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02001958 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001959
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001960 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
1961 fid->volatile_fid, 0, srch_inf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001962 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07001963 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001964 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001965 }
1966 return rc;
1967}
1968
1969static int
1970smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
1971 struct cifs_fid *fid, __u16 search_flags,
1972 struct cifs_search_info *srch_inf)
1973{
1974 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
1975 fid->volatile_fid, 0, srch_inf);
1976}
1977
1978static int
1979smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
1980 struct cifs_fid *fid)
1981{
1982 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1983}
1984
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001985/*
Christoph Probsta205d502019-05-08 21:36:25 +02001986 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
1987 * the number of credits and return true. Otherwise - return false.
1988 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001989static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08001990smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001991{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10001992 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001993
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07001994 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001995 return false;
1996
Pavel Shilovsky66265f12019-01-23 17:11:16 -08001997 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001998 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07001999 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002000 spin_unlock(&server->req_lock);
2001 wake_up(&server->request_q);
2002 }
2003
2004 return true;
2005}
2006
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002007static bool
2008smb2_is_session_expired(char *buf)
2009{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002010 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002011
Mark Symsd81243c2018-05-24 09:47:31 +01002012 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2013 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002014 return false;
2015
Steve Frenche68a9322018-07-30 14:23:58 -05002016 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2017 le16_to_cpu(shdr->Command),
2018 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002019 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002020
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002021 return true;
2022}
2023
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002024static int
2025smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2026 struct cifsInodeInfo *cinode)
2027{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002028 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2029 return SMB2_lease_break(0, tcon, cinode->lease_key,
2030 smb2_get_lease_state(cinode));
2031
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002032 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2033 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002034 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002035}
2036
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002037void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002038smb2_set_related(struct smb_rqst *rqst)
2039{
2040 struct smb2_sync_hdr *shdr;
2041
2042 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2043 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2044}
2045
2046char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2047
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002048void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002049smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002050{
2051 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002052 struct cifs_ses *ses = tcon->ses;
2053 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002054 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002055 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002056
2057 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002058
2059 /* No padding needed */
2060 if (!(len & 7))
2061 goto finished;
2062
2063 num_padding = 8 - (len & 7);
2064 if (!smb3_encryption_required(tcon)) {
2065 /*
2066 * If we do not have encryption then we can just add an extra
2067 * iov for the padding.
2068 */
2069 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2070 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2071 rqst->rq_nvec++;
2072 len += num_padding;
2073 } else {
2074 /*
2075 * We can not add a small padding iov for the encryption case
2076 * because the encryption framework can not handle the padding
2077 * iovs.
2078 * We have to flatten this into a single buffer and add
2079 * the padding to it.
2080 */
2081 for (i = 1; i < rqst->rq_nvec; i++) {
2082 memcpy(rqst->rq_iov[0].iov_base +
2083 rqst->rq_iov[0].iov_len,
2084 rqst->rq_iov[i].iov_base,
2085 rqst->rq_iov[i].iov_len);
2086 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002087 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002088 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2089 0, num_padding);
2090 rqst->rq_iov[0].iov_len += num_padding;
2091 len += num_padding;
2092 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002093 }
2094
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002095 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002096 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2097 shdr->NextCommand = cpu_to_le32(len);
2098}
2099
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002100/*
2101 * Passes the query info response back to the caller on success.
2102 * Caller need to free this with free_rsp_buf().
2103 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002104int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002105smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2106 __le16 *utf16_path, u32 desired_access,
2107 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002108 struct kvec *rsp, int *buftype,
2109 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002110{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002111 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002112 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002113 struct smb_rqst rqst[3];
2114 int resp_buftype[3];
2115 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002116 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002117 struct kvec qi_iov[1];
2118 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002119 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002120 struct cifs_open_parms oparms;
2121 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002122 int rc;
2123
2124 if (smb3_encryption_required(tcon))
2125 flags |= CIFS_TRANSFORM_REQ;
2126
2127 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002128 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002129 memset(rsp_iov, 0, sizeof(rsp_iov));
2130
2131 memset(&open_iov, 0, sizeof(open_iov));
2132 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002133 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002134
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002135 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002136 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002137 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002138 if (cifs_sb && backup_cred(cifs_sb))
2139 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2140 else
2141 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002142 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002143 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002144
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002145 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002146 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002147 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002148 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002149
2150 memset(&qi_iov, 0, sizeof(qi_iov));
2151 rqst[1].rq_iov = qi_iov;
2152 rqst[1].rq_nvec = 1;
2153
2154 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002155 class, type, 0,
2156 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002157 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002158 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002159 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002160 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002161 smb2_set_related(&rqst[1]);
2162
2163 memset(&close_iov, 0, sizeof(close_iov));
2164 rqst[2].rq_iov = close_iov;
2165 rqst[2].rq_nvec = 1;
2166
2167 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2168 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002169 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002170 smb2_set_related(&rqst[2]);
2171
2172 rc = compound_send_recv(xid, ses, flags, 3, rqst,
2173 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002174 if (rc) {
2175 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002176 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002177 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002178 *rsp = rsp_iov[1];
2179 *buftype = resp_buftype[1];
2180
2181 qic_exit:
2182 SMB2_open_free(&rqst[0]);
2183 SMB2_query_info_free(&rqst[1]);
2184 SMB2_close_free(&rqst[2]);
2185 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2186 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2187 return rc;
2188}
2189
2190static int
2191smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2192 struct kstatfs *buf)
2193{
2194 struct smb2_query_info_rsp *rsp;
2195 struct smb2_fs_full_size_info *info = NULL;
2196 __le16 utf16_path = 0; /* Null - open root of share */
2197 struct kvec rsp_iov = {NULL, 0};
2198 int buftype = CIFS_NO_BUFFER;
2199 int rc;
2200
2201
2202 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2203 FILE_READ_ATTRIBUTES,
2204 FS_FULL_SIZE_INFORMATION,
2205 SMB2_O_INFO_FILESYSTEM,
2206 sizeof(struct smb2_fs_full_size_info),
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002207 &rsp_iov, &buftype, NULL);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002208 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002209 goto qfs_exit;
2210
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002211 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002212 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002213 info = (struct smb2_fs_full_size_info *)(
2214 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2215 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2216 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002217 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002218 sizeof(struct smb2_fs_full_size_info));
2219 if (!rc)
2220 smb2_copy_fs_info_to_kstatfs(info, buf);
2221
2222qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002223 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002224 return rc;
2225}
2226
Steve French2d304212018-06-24 23:28:12 -05002227static int
2228smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2229 struct kstatfs *buf)
2230{
2231 int rc;
2232 __le16 srch_path = 0; /* Null - open root of share */
2233 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2234 struct cifs_open_parms oparms;
2235 struct cifs_fid fid;
2236
2237 if (!tcon->posix_extensions)
2238 return smb2_queryfs(xid, tcon, buf);
2239
2240 oparms.tcon = tcon;
2241 oparms.desired_access = FILE_READ_ATTRIBUTES;
2242 oparms.disposition = FILE_OPEN;
2243 oparms.create_options = 0;
2244 oparms.fid = &fid;
2245 oparms.reconnect = false;
2246
2247 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
2248 if (rc)
2249 return rc;
2250
2251 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2252 fid.volatile_fid, buf);
2253 buf->f_type = SMB2_MAGIC_NUMBER;
2254 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2255 return rc;
2256}
Steve French2d304212018-06-24 23:28:12 -05002257
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002258static bool
2259smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2260{
2261 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2262 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2263}
2264
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002265static int
2266smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2267 __u64 length, __u32 type, int lock, int unlock, bool wait)
2268{
2269 if (unlock && !lock)
2270 type = SMB2_LOCKFLAG_UNLOCK;
2271 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2272 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2273 current->tgid, length, offset, type, wait);
2274}
2275
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002276static void
2277smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2278{
2279 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2280}
2281
2282static void
2283smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2284{
2285 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2286}
2287
2288static void
2289smb2_new_lease_key(struct cifs_fid *fid)
2290{
Steve Frenchfa70b872016-09-22 00:39:34 -05002291 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002292}
2293
Aurelien Aptel9d496402017-02-13 16:16:49 +01002294static int
2295smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2296 const char *search_name,
2297 struct dfs_info3_param **target_nodes,
2298 unsigned int *num_of_nodes,
2299 const struct nls_table *nls_codepage, int remap)
2300{
2301 int rc;
2302 __le16 *utf16_path = NULL;
2303 int utf16_path_len = 0;
2304 struct cifs_tcon *tcon;
2305 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2306 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2307 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2308
Christoph Probsta205d502019-05-08 21:36:25 +02002309 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002310
2311 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002312 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002313 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002314 tcon = ses->tcon_ipc;
2315 if (tcon == NULL) {
2316 spin_lock(&cifs_tcp_ses_lock);
2317 tcon = list_first_entry_or_null(&ses->tcon_list,
2318 struct cifs_tcon,
2319 tcon_list);
2320 if (tcon)
2321 tcon->tc_count++;
2322 spin_unlock(&cifs_tcp_ses_lock);
2323 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002324
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002325 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002326 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2327 ses);
2328 rc = -ENOTCONN;
2329 goto out;
2330 }
2331
2332 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2333 &utf16_path_len,
2334 nls_codepage, remap);
2335 if (!utf16_path) {
2336 rc = -ENOMEM;
2337 goto out;
2338 }
2339
2340 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2341 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2342 if (!dfs_req) {
2343 rc = -ENOMEM;
2344 goto out;
2345 }
2346
2347 /* Highest DFS referral version understood */
2348 dfs_req->MaxReferralLevel = DFS_VERSION;
2349
2350 /* Path to resolve in an UTF-16 null-terminated string */
2351 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2352
2353 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002354 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2355 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002356 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002357 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002358 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002359 } while (rc == -EAGAIN);
2360
2361 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002362 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Christoph Probsta205d502019-05-08 21:36:25 +02002363 cifs_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002364 goto out;
2365 }
2366
2367 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2368 num_of_nodes, target_nodes,
2369 nls_codepage, remap, search_name,
2370 true /* is_unicode */);
2371 if (rc) {
Christoph Probsta205d502019-05-08 21:36:25 +02002372 cifs_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002373 goto out;
2374 }
2375
2376 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002377 if (tcon && !tcon->ipc) {
2378 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002379 spin_lock(&cifs_tcp_ses_lock);
2380 tcon->tc_count--;
2381 spin_unlock(&cifs_tcp_ses_lock);
2382 }
2383 kfree(utf16_path);
2384 kfree(dfs_req);
2385 kfree(dfs_rsp);
2386 return rc;
2387}
Pavel Shilovsky78932422016-07-24 10:37:38 +03002388#define SMB2_SYMLINK_STRUCT_SIZE \
2389 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2390
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002391static int
2392smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002393 struct cifs_sb_info *cifs_sb, const char *full_path,
2394 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002395{
2396 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002397 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002398 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2399 struct cifs_open_parms oparms;
2400 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002401 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002402 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002403 struct smb2_symlink_err_rsp *symlink;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002404 unsigned int sub_len;
2405 unsigned int sub_offset;
2406 unsigned int print_len;
2407 unsigned int print_offset;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002408 int flags = 0;
2409 struct smb_rqst rqst[3];
2410 int resp_buftype[3];
2411 struct kvec rsp_iov[3];
2412 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2413 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
2414 struct kvec close_iov[1];
2415 struct smb2_create_rsp *create_rsp;
2416 struct smb2_ioctl_rsp *ioctl_rsp;
2417 char *ioctl_buf;
2418 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002419
2420 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2421
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002422 if (smb3_encryption_required(tcon))
2423 flags |= CIFS_TRANSFORM_REQ;
2424
2425 memset(rqst, 0, sizeof(rqst));
2426 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2427 memset(rsp_iov, 0, sizeof(rsp_iov));
2428
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002429 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2430 if (!utf16_path)
2431 return -ENOMEM;
2432
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002433 /* Open */
2434 memset(&open_iov, 0, sizeof(open_iov));
2435 rqst[0].rq_iov = open_iov;
2436 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2437
2438 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002439 oparms.tcon = tcon;
2440 oparms.desired_access = FILE_READ_ATTRIBUTES;
2441 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002442
Steve French5e196972018-08-27 17:04:13 -05002443 if (backup_cred(cifs_sb))
2444 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2445 else
2446 oparms.create_options = 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002447 if (is_reparse_point)
2448 oparms.create_options = OPEN_REPARSE_POINT;
2449
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002450 oparms.fid = &fid;
2451 oparms.reconnect = false;
2452
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002453 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
2454 if (rc)
2455 goto querty_exit;
2456 smb2_set_next_command(tcon, &rqst[0]);
2457
2458
2459 /* IOCTL */
2460 memset(&io_iov, 0, sizeof(io_iov));
2461 rqst[1].rq_iov = io_iov;
2462 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
2463
2464 rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
2465 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
2466 true /* is_fctl */, NULL, 0, CIFSMaxBufSize);
2467 if (rc)
2468 goto querty_exit;
2469
2470 smb2_set_next_command(tcon, &rqst[1]);
2471 smb2_set_related(&rqst[1]);
2472
2473
2474 /* Close */
2475 memset(&close_iov, 0, sizeof(close_iov));
2476 rqst[2].rq_iov = close_iov;
2477 rqst[2].rq_nvec = 1;
2478
2479 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2480 if (rc)
2481 goto querty_exit;
2482
2483 smb2_set_related(&rqst[2]);
2484
2485 rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
2486 resp_buftype, rsp_iov);
2487
2488 create_rsp = rsp_iov[0].iov_base;
2489 if (create_rsp && create_rsp->sync_hdr.Status)
2490 err_iov = rsp_iov[0];
2491 ioctl_rsp = rsp_iov[1].iov_base;
2492
2493 /*
2494 * Open was successful and we got an ioctl response.
2495 */
2496 if ((rc == 0) && (is_reparse_point)) {
2497 /* See MS-FSCC 2.3.23 */
2498
2499 ioctl_buf = (char *)ioctl_rsp + le32_to_cpu(ioctl_rsp->OutputOffset);
2500 plen = le32_to_cpu(ioctl_rsp->OutputCount);
2501
2502 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
2503 rsp_iov[1].iov_len) {
2504 cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", plen);
2505 rc = -EIO;
2506 goto querty_exit;
2507 }
2508
2509 /* Do stuff with ioctl_buf/plen */
2510 goto querty_exit;
2511 }
2512
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002513 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002514 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002515 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002516 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002517
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002518 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002519 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002520 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002521 rc = -ENOENT;
2522 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002523 }
2524
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002525 /* open must fail on symlink - reset rc */
2526 rc = 0;
2527 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2528 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2529 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002530 print_len = le16_to_cpu(symlink->PrintNameLength);
2531 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2532
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002533 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002534 rc = -ENOENT;
2535 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002536 }
2537
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002538 if (err_iov.iov_len <
2539 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002540 rc = -ENOENT;
2541 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002542 }
2543
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002544 *target_path = cifs_strndup_from_utf16(
2545 (char *)symlink->PathBuffer + sub_offset,
2546 sub_len, true, cifs_sb->local_nls);
2547 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002548 rc = -ENOMEM;
2549 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002550 }
2551 convert_delimiter(*target_path, '/');
2552 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002553
2554 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002555 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002556 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002557 SMB2_open_free(&rqst[0]);
2558 SMB2_ioctl_free(&rqst[1]);
2559 SMB2_close_free(&rqst[2]);
2560 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2561 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2562 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002563 return rc;
2564}
2565
Arnd Bergmann84908422017-06-27 17:06:13 +02002566#ifdef CONFIG_CIFS_ACL
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002567static struct cifs_ntsd *
2568get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2569 const struct cifs_fid *cifsfid, u32 *pacllen)
2570{
2571 struct cifs_ntsd *pntsd = NULL;
2572 unsigned int xid;
2573 int rc = -EOPNOTSUPP;
2574 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2575
2576 if (IS_ERR(tlink))
2577 return ERR_CAST(tlink);
2578
2579 xid = get_xid();
2580 cifs_dbg(FYI, "trying to get acl\n");
2581
2582 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2583 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2584 free_xid(xid);
2585
2586 cifs_put_tlink(tlink);
2587
2588 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2589 if (rc)
2590 return ERR_PTR(rc);
2591 return pntsd;
2592
2593}
2594
2595static struct cifs_ntsd *
2596get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2597 const char *path, u32 *pacllen)
2598{
2599 struct cifs_ntsd *pntsd = NULL;
2600 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2601 unsigned int xid;
2602 int rc;
2603 struct cifs_tcon *tcon;
2604 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2605 struct cifs_fid fid;
2606 struct cifs_open_parms oparms;
2607 __le16 *utf16_path;
2608
2609 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
2610 if (IS_ERR(tlink))
2611 return ERR_CAST(tlink);
2612
2613 tcon = tlink_tcon(tlink);
2614 xid = get_xid();
2615
2616 if (backup_cred(cifs_sb))
Colin Ian King709340a2017-07-05 13:47:34 +01002617 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002618 else
2619 oparms.create_options = 0;
2620
2621 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002622 if (!utf16_path) {
2623 rc = -ENOMEM;
2624 free_xid(xid);
2625 return ERR_PTR(rc);
2626 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002627
2628 oparms.tcon = tcon;
2629 oparms.desired_access = READ_CONTROL;
2630 oparms.disposition = FILE_OPEN;
2631 oparms.fid = &fid;
2632 oparms.reconnect = false;
2633
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002634 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002635 kfree(utf16_path);
2636 if (!rc) {
2637 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2638 fid.volatile_fid, (void **)&pntsd, pacllen);
2639 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2640 }
2641
2642 cifs_put_tlink(tlink);
2643 free_xid(xid);
2644
2645 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2646 if (rc)
2647 return ERR_PTR(rc);
2648 return pntsd;
2649}
2650
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002651#ifdef CONFIG_CIFS_ACL
2652static int
2653set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2654 struct inode *inode, const char *path, int aclflag)
2655{
2656 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2657 unsigned int xid;
2658 int rc, access_flags = 0;
2659 struct cifs_tcon *tcon;
2660 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2661 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2662 struct cifs_fid fid;
2663 struct cifs_open_parms oparms;
2664 __le16 *utf16_path;
2665
2666 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
2667 if (IS_ERR(tlink))
2668 return PTR_ERR(tlink);
2669
2670 tcon = tlink_tcon(tlink);
2671 xid = get_xid();
2672
2673 if (backup_cred(cifs_sb))
2674 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2675 else
2676 oparms.create_options = 0;
2677
2678 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
2679 access_flags = WRITE_OWNER;
2680 else
2681 access_flags = WRITE_DAC;
2682
2683 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002684 if (!utf16_path) {
2685 rc = -ENOMEM;
2686 free_xid(xid);
2687 return rc;
2688 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002689
2690 oparms.tcon = tcon;
2691 oparms.desired_access = access_flags;
2692 oparms.disposition = FILE_OPEN;
2693 oparms.path = path;
2694 oparms.fid = &fid;
2695 oparms.reconnect = false;
2696
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002697 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002698 kfree(utf16_path);
2699 if (!rc) {
2700 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2701 fid.volatile_fid, pnntsd, acllen, aclflag);
2702 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2703 }
2704
2705 cifs_put_tlink(tlink);
2706 free_xid(xid);
2707 return rc;
2708}
2709#endif /* CIFS_ACL */
2710
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002711/* Retrieve an ACL from the server */
2712static struct cifs_ntsd *
2713get_smb2_acl(struct cifs_sb_info *cifs_sb,
2714 struct inode *inode, const char *path,
2715 u32 *pacllen)
2716{
2717 struct cifs_ntsd *pntsd = NULL;
2718 struct cifsFileInfo *open_file = NULL;
2719
2720 if (inode)
2721 open_file = find_readable_file(CIFS_I(inode), true);
2722 if (!open_file)
2723 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
2724
2725 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
2726 cifsFileInfo_put(open_file);
2727 return pntsd;
2728}
Arnd Bergmann84908422017-06-27 17:06:13 +02002729#endif
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002730
Steve French30175622014-08-17 18:16:40 -05002731static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2732 loff_t offset, loff_t len, bool keep_size)
2733{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002734 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05002735 struct inode *inode;
2736 struct cifsInodeInfo *cifsi;
2737 struct cifsFileInfo *cfile = file->private_data;
2738 struct file_zero_data_information fsctl_buf;
2739 long rc;
2740 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002741 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05002742
2743 xid = get_xid();
2744
David Howells2b0143b2015-03-17 22:25:59 +00002745 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05002746 cifsi = CIFS_I(inode);
2747
Christoph Probsta205d502019-05-08 21:36:25 +02002748 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05002749 ses->Suid, offset, len);
2750
2751
Steve French30175622014-08-17 18:16:40 -05002752 /* if file not oplocked can't be sure whether asking to extend size */
2753 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002754 if (keep_size == false) {
2755 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002756 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
2757 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002758 free_xid(xid);
2759 return rc;
2760 }
Steve French30175622014-08-17 18:16:40 -05002761
Steve Frenchd1c35af2019-05-09 00:09:37 -05002762 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05002763
2764 fsctl_buf.FileOffset = cpu_to_le64(offset);
2765 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2766
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10002767 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2768 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
2769 (char *)&fsctl_buf,
2770 sizeof(struct file_zero_data_information),
2771 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002772 if (rc)
2773 goto zero_range_exit;
2774
2775 /*
2776 * do we also need to change the size of the file?
2777 */
2778 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002779 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10002780 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
2781 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002782 }
2783
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002784 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05002785 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05002786 if (rc)
2787 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
2788 ses->Suid, offset, len, rc);
2789 else
2790 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
2791 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05002792 return rc;
2793}
2794
Steve French31742c52014-08-17 08:38:47 -05002795static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2796 loff_t offset, loff_t len)
2797{
2798 struct inode *inode;
2799 struct cifsInodeInfo *cifsi;
2800 struct cifsFileInfo *cfile = file->private_data;
2801 struct file_zero_data_information fsctl_buf;
2802 long rc;
2803 unsigned int xid;
2804 __u8 set_sparse = 1;
2805
2806 xid = get_xid();
2807
David Howells2b0143b2015-03-17 22:25:59 +00002808 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05002809 cifsi = CIFS_I(inode);
2810
2811 /* Need to make file sparse, if not already, before freeing range. */
2812 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05002813 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
2814 rc = -EOPNOTSUPP;
2815 free_xid(xid);
2816 return rc;
2817 }
Steve French31742c52014-08-17 08:38:47 -05002818
Christoph Probsta205d502019-05-08 21:36:25 +02002819 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05002820
2821 fsctl_buf.FileOffset = cpu_to_le64(offset);
2822 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2823
2824 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2825 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002826 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05002827 sizeof(struct file_zero_data_information),
2828 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05002829 free_xid(xid);
2830 return rc;
2831}
2832
Steve French9ccf3212014-10-18 17:01:15 -05002833static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
2834 loff_t off, loff_t len, bool keep_size)
2835{
2836 struct inode *inode;
2837 struct cifsInodeInfo *cifsi;
2838 struct cifsFileInfo *cfile = file->private_data;
2839 long rc = -EOPNOTSUPP;
2840 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10002841 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05002842
2843 xid = get_xid();
2844
David Howells2b0143b2015-03-17 22:25:59 +00002845 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05002846 cifsi = CIFS_I(inode);
2847
Steve French779ede02019-03-13 01:41:49 -05002848 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
2849 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05002850 /* if file not oplocked can't be sure whether asking to extend size */
2851 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002852 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05002853 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
2854 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002855 free_xid(xid);
2856 return rc;
2857 }
Steve French9ccf3212014-10-18 17:01:15 -05002858
2859 /*
2860 * Files are non-sparse by default so falloc may be a no-op
2861 * Must check if file sparse. If not sparse, and not extending
2862 * then no need to do anything since file already allocated
2863 */
2864 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
2865 if (keep_size == true)
Steve Frenchcfe89092018-05-19 02:04:55 -05002866 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05002867 /* check if extending file */
2868 else if (i_size_read(inode) >= off + len)
2869 /* not extending file and already not sparse */
Steve Frenchcfe89092018-05-19 02:04:55 -05002870 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05002871 /* BB: in future add else clause to extend file */
2872 else
Steve Frenchcfe89092018-05-19 02:04:55 -05002873 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002874 if (rc)
2875 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
2876 tcon->tid, tcon->ses->Suid, off, len, rc);
2877 else
2878 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid,
2879 tcon->tid, tcon->ses->Suid, off, len);
Steve Frenchcfe89092018-05-19 02:04:55 -05002880 free_xid(xid);
2881 return rc;
Steve French9ccf3212014-10-18 17:01:15 -05002882 }
2883
2884 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
2885 /*
2886 * Check if falloc starts within first few pages of file
2887 * and ends within a few pages of the end of file to
2888 * ensure that most of file is being forced to be
2889 * fallocated now. If so then setting whole file sparse
2890 * ie potentially making a few extra pages at the beginning
2891 * or end of the file non-sparse via set_sparse is harmless.
2892 */
Steve Frenchcfe89092018-05-19 02:04:55 -05002893 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
2894 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002895 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
2896 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002897 free_xid(xid);
2898 return rc;
2899 }
Steve French9ccf3212014-10-18 17:01:15 -05002900
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10002901 smb2_set_sparse(xid, tcon, cfile, inode, false);
2902 rc = 0;
2903 } else {
2904 smb2_set_sparse(xid, tcon, cfile, inode, false);
2905 rc = 0;
2906 if (i_size_read(inode) < off + len) {
2907 eof = cpu_to_le64(off + len);
2908 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
2909 cfile->fid.volatile_fid, cfile->pid,
2910 &eof);
2911 }
Steve French9ccf3212014-10-18 17:01:15 -05002912 }
Steve French9ccf3212014-10-18 17:01:15 -05002913
Steve French779ede02019-03-13 01:41:49 -05002914 if (rc)
2915 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
2916 tcon->ses->Suid, off, len, rc);
2917 else
2918 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
2919 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05002920
2921 free_xid(xid);
2922 return rc;
2923}
2924
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10002925static int smb3_fiemap(struct cifs_tcon *tcon,
2926 struct cifsFileInfo *cfile,
2927 struct fiemap_extent_info *fei, u64 start, u64 len)
2928{
2929 unsigned int xid;
2930 struct file_allocated_range_buffer in_data, *out_data;
2931 u32 out_data_len;
2932 int i, num, rc, flags, last_blob;
2933 u64 next;
2934
2935 if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
2936 return -EBADR;
2937
2938 xid = get_xid();
2939 again:
2940 in_data.file_offset = cpu_to_le64(start);
2941 in_data.length = cpu_to_le64(len);
2942
2943 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2944 cfile->fid.volatile_fid,
2945 FSCTL_QUERY_ALLOCATED_RANGES, true,
2946 (char *)&in_data, sizeof(in_data),
2947 1024 * sizeof(struct file_allocated_range_buffer),
2948 (char **)&out_data, &out_data_len);
2949 if (rc == -E2BIG) {
2950 last_blob = 0;
2951 rc = 0;
2952 } else
2953 last_blob = 1;
2954 if (rc)
2955 goto out;
2956
2957 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
2958 rc = -EINVAL;
2959 goto out;
2960 }
2961 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
2962 rc = -EINVAL;
2963 goto out;
2964 }
2965
2966 num = out_data_len / sizeof(struct file_allocated_range_buffer);
2967 for (i = 0; i < num; i++) {
2968 flags = 0;
2969 if (i == num - 1 && last_blob)
2970 flags |= FIEMAP_EXTENT_LAST;
2971
2972 rc = fiemap_fill_next_extent(fei,
2973 le64_to_cpu(out_data[i].file_offset),
2974 le64_to_cpu(out_data[i].file_offset),
2975 le64_to_cpu(out_data[i].length),
2976 flags);
2977 if (rc < 0)
2978 goto out;
2979 if (rc == 1) {
2980 rc = 0;
2981 goto out;
2982 }
2983 }
2984
2985 if (!last_blob) {
2986 next = le64_to_cpu(out_data[num - 1].file_offset) +
2987 le64_to_cpu(out_data[num - 1].length);
2988 len = len - (next - start);
2989 start = next;
2990 goto again;
2991 }
2992
2993 out:
2994 free_xid(xid);
2995 kfree(out_data);
2996 return rc;
2997}
Steve French9ccf3212014-10-18 17:01:15 -05002998
Steve French31742c52014-08-17 08:38:47 -05002999static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3000 loff_t off, loff_t len)
3001{
3002 /* KEEP_SIZE already checked for by do_fallocate */
3003 if (mode & FALLOC_FL_PUNCH_HOLE)
3004 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05003005 else if (mode & FALLOC_FL_ZERO_RANGE) {
3006 if (mode & FALLOC_FL_KEEP_SIZE)
3007 return smb3_zero_range(file, tcon, off, len, true);
3008 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05003009 } else if (mode == FALLOC_FL_KEEP_SIZE)
3010 return smb3_simple_falloc(file, tcon, off, len, true);
3011 else if (mode == 0)
3012 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05003013
3014 return -EOPNOTSUPP;
3015}
3016
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003017static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003018smb2_downgrade_oplock(struct TCP_Server_Info *server,
3019 struct cifsInodeInfo *cinode, bool set_level2)
3020{
3021 if (set_level2)
3022 server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
3023 0, NULL);
3024 else
3025 server->ops->set_oplock_level(cinode, 0, 0, NULL);
3026}
3027
3028static void
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003029smb21_downgrade_oplock(struct TCP_Server_Info *server,
3030 struct cifsInodeInfo *cinode, bool set_level2)
3031{
3032 server->ops->set_oplock_level(cinode,
3033 set_level2 ? SMB2_LEASE_READ_CACHING_HE :
3034 0, 0, NULL);
3035}
3036
3037static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003038smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3039 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003040{
3041 oplock &= 0xFF;
3042 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3043 return;
3044 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003045 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003046 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3047 &cinode->vfs_inode);
3048 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003049 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003050 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3051 &cinode->vfs_inode);
3052 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3053 cinode->oplock = CIFS_CACHE_READ_FLG;
3054 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3055 &cinode->vfs_inode);
3056 } else
3057 cinode->oplock = 0;
3058}
3059
3060static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003061smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3062 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003063{
3064 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003065 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003066
3067 oplock &= 0xFF;
3068 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3069 return;
3070
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003071 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003072 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003073 strcat(message, "R");
3074 }
3075 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003076 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003077 strcat(message, "H");
3078 }
3079 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003080 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003081 strcat(message, "W");
3082 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003083 if (!new_oplock)
3084 strncpy(message, "None", sizeof(message));
3085
3086 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003087 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3088 &cinode->vfs_inode);
3089}
3090
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003091static void
3092smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3093 unsigned int epoch, bool *purge_cache)
3094{
3095 unsigned int old_oplock = cinode->oplock;
3096
3097 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3098
3099 if (purge_cache) {
3100 *purge_cache = false;
3101 if (old_oplock == CIFS_CACHE_READ_FLG) {
3102 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3103 (epoch - cinode->epoch > 0))
3104 *purge_cache = true;
3105 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3106 (epoch - cinode->epoch > 1))
3107 *purge_cache = true;
3108 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3109 (epoch - cinode->epoch > 1))
3110 *purge_cache = true;
3111 else if (cinode->oplock == 0 &&
3112 (epoch - cinode->epoch > 0))
3113 *purge_cache = true;
3114 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
3115 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3116 (epoch - cinode->epoch > 0))
3117 *purge_cache = true;
3118 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3119 (epoch - cinode->epoch > 1))
3120 *purge_cache = true;
3121 }
3122 cinode->epoch = epoch;
3123 }
3124}
3125
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003126static bool
3127smb2_is_read_op(__u32 oplock)
3128{
3129 return oplock == SMB2_OPLOCK_LEVEL_II;
3130}
3131
3132static bool
3133smb21_is_read_op(__u32 oplock)
3134{
3135 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3136 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3137}
3138
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003139static __le32
3140map_oplock_to_lease(u8 oplock)
3141{
3142 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3143 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3144 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3145 return SMB2_LEASE_READ_CACHING;
3146 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3147 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3148 SMB2_LEASE_WRITE_CACHING;
3149 return 0;
3150}
3151
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003152static char *
3153smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3154{
3155 struct create_lease *buf;
3156
3157 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
3158 if (!buf)
3159 return NULL;
3160
Stefano Brivio729c0c92018-07-05 15:10:02 +02003161 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003162 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003163
3164 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3165 (struct create_lease, lcontext));
3166 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
3167 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3168 (struct create_lease, Name));
3169 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003170 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003171 buf->Name[0] = 'R';
3172 buf->Name[1] = 'q';
3173 buf->Name[2] = 'L';
3174 buf->Name[3] = 's';
3175 return (char *)buf;
3176}
3177
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003178static char *
3179smb3_create_lease_buf(u8 *lease_key, u8 oplock)
3180{
3181 struct create_lease_v2 *buf;
3182
3183 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
3184 if (!buf)
3185 return NULL;
3186
Stefano Brivio729c0c92018-07-05 15:10:02 +02003187 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003188 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3189
3190 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3191 (struct create_lease_v2, lcontext));
3192 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
3193 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3194 (struct create_lease_v2, Name));
3195 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003196 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003197 buf->Name[0] = 'R';
3198 buf->Name[1] = 'q';
3199 buf->Name[2] = 'L';
3200 buf->Name[3] = 's';
3201 return (char *)buf;
3202}
3203
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003204static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003205smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003206{
3207 struct create_lease *lc = (struct create_lease *)buf;
3208
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003209 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003210 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3211 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3212 return le32_to_cpu(lc->lcontext.LeaseState);
3213}
3214
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003215static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003216smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003217{
3218 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
3219
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003220 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003221 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3222 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003223 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02003224 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003225 return le32_to_cpu(lc->lcontext.LeaseState);
3226}
3227
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003228static unsigned int
3229smb2_wp_retry_size(struct inode *inode)
3230{
3231 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
3232 SMB2_MAX_BUFFER_SIZE);
3233}
3234
Pavel Shilovsky52755802014-08-18 20:49:57 +04003235static bool
3236smb2_dir_needs_close(struct cifsFileInfo *cfile)
3237{
3238 return !cfile->invalidHandle;
3239}
3240
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003241static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003242fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
3243 struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003244{
3245 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003246 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003247
3248 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
3249 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
3250 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
3251 tr_hdr->Flags = cpu_to_le16(0x01);
3252 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CMM_NONCE);
3253 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003254}
3255
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003256/* We can not use the normal sg_set_buf() as we will sometimes pass a
3257 * stack object as buf.
3258 */
3259static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3260 unsigned int buflen)
3261{
3262 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
3263}
3264
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003265/* Assumes the first rqst has a transform header as the first iov.
3266 * I.e.
3267 * rqst[0].rq_iov[0] is transform header
3268 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
3269 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003270 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003271static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003272init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003273{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003274 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003275 struct scatterlist *sg;
3276 unsigned int i;
3277 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003278 unsigned int idx = 0;
3279 int skip;
3280
3281 sg_len = 1;
3282 for (i = 0; i < num_rqst; i++)
3283 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003284
3285 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
3286 if (!sg)
3287 return NULL;
3288
3289 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003290 for (i = 0; i < num_rqst; i++) {
3291 for (j = 0; j < rqst[i].rq_nvec; j++) {
3292 /*
3293 * The first rqst has a transform header where the
3294 * first 20 bytes are not part of the encrypted blob
3295 */
3296 skip = (i == 0) && (j == 0) ? 20 : 0;
3297 smb2_sg_set_buf(&sg[idx++],
3298 rqst[i].rq_iov[j].iov_base + skip,
3299 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10003300 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05003301
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003302 for (j = 0; j < rqst[i].rq_npages; j++) {
3303 unsigned int len, offset;
3304
3305 rqst_page_get_length(&rqst[i], j, &len, &offset);
3306 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
3307 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003308 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003309 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003310 return sg;
3311}
3312
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003313static int
3314smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
3315{
3316 struct cifs_ses *ses;
3317 u8 *ses_enc_key;
3318
3319 spin_lock(&cifs_tcp_ses_lock);
3320 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3321 if (ses->Suid != ses_id)
3322 continue;
3323 ses_enc_key = enc ? ses->smb3encryptionkey :
3324 ses->smb3decryptionkey;
3325 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
3326 spin_unlock(&cifs_tcp_ses_lock);
3327 return 0;
3328 }
3329 spin_unlock(&cifs_tcp_ses_lock);
3330
3331 return 1;
3332}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003333/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003334 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
3335 * iov[0] - transform header (associate data),
3336 * iov[1-N] - SMB2 header and pages - data to encrypt.
3337 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003338 * untouched.
3339 */
3340static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003341crypt_message(struct TCP_Server_Info *server, int num_rqst,
3342 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003343{
3344 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003345 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003346 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003347 int rc = 0;
3348 struct scatterlist *sg;
3349 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003350 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003351 struct aead_request *req;
3352 char *iv;
3353 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003354 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003355 struct crypto_aead *tfm;
3356 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3357
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003358 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
3359 if (rc) {
3360 cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
3361 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003362 return 0;
3363 }
3364
3365 rc = smb3_crypto_aead_allocate(server);
3366 if (rc) {
3367 cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
3368 return rc;
3369 }
3370
3371 tfm = enc ? server->secmech.ccmaesencrypt :
3372 server->secmech.ccmaesdecrypt;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003373 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003374 if (rc) {
3375 cifs_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
3376 return rc;
3377 }
3378
3379 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3380 if (rc) {
3381 cifs_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
3382 return rc;
3383 }
3384
3385 req = aead_request_alloc(tfm, GFP_KERNEL);
3386 if (!req) {
Christoph Probsta205d502019-05-08 21:36:25 +02003387 cifs_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003388 return -ENOMEM;
3389 }
3390
3391 if (!enc) {
3392 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3393 crypt_len += SMB2_SIGNATURE_SIZE;
3394 }
3395
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003396 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003397 if (!sg) {
Christoph Probsta205d502019-05-08 21:36:25 +02003398 cifs_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003399 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003400 goto free_req;
3401 }
3402
3403 iv_len = crypto_aead_ivsize(tfm);
3404 iv = kzalloc(iv_len, GFP_KERNEL);
3405 if (!iv) {
Christoph Probsta205d502019-05-08 21:36:25 +02003406 cifs_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003407 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003408 goto free_sg;
3409 }
3410 iv[0] = 3;
3411 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CMM_NONCE);
3412
3413 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
3414 aead_request_set_ad(req, assoc_data_len);
3415
3416 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003417 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003418
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003419 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
3420 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003421
3422 if (!rc && enc)
3423 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
3424
3425 kfree(iv);
3426free_sg:
3427 kfree(sg);
3428free_req:
3429 kfree(req);
3430 return rc;
3431}
3432
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003433void
3434smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003435{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003436 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003437
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003438 for (i = 0; i < num_rqst; i++) {
3439 if (rqst[i].rq_pages) {
3440 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
3441 put_page(rqst[i].rq_pages[j]);
3442 kfree(rqst[i].rq_pages);
3443 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003444 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003445}
3446
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003447/*
3448 * This function will initialize new_rq and encrypt the content.
3449 * The first entry, new_rq[0], only contains a single iov which contains
3450 * a smb2_transform_hdr and is pre-allocated by the caller.
3451 * This function then populates new_rq[1+] with the content from olq_rq[0+].
3452 *
3453 * The end result is an array of smb_rqst structures where the first structure
3454 * only contains a single iov for the transform header which we then can pass
3455 * to crypt_message().
3456 *
3457 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
3458 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
3459 */
3460static int
3461smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
3462 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003463{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003464 struct page **pages;
3465 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
3466 unsigned int npages;
3467 unsigned int orig_len = 0;
3468 int i, j;
3469 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003470
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003471 for (i = 1; i < num_rqst; i++) {
3472 npages = old_rq[i - 1].rq_npages;
3473 pages = kmalloc_array(npages, sizeof(struct page *),
3474 GFP_KERNEL);
3475 if (!pages)
3476 goto err_free;
3477
3478 new_rq[i].rq_pages = pages;
3479 new_rq[i].rq_npages = npages;
3480 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
3481 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
3482 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
3483 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
3484 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
3485
3486 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
3487
3488 for (j = 0; j < npages; j++) {
3489 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3490 if (!pages[j])
3491 goto err_free;
3492 }
3493
3494 /* copy pages form the old */
3495 for (j = 0; j < npages; j++) {
3496 char *dst, *src;
3497 unsigned int offset, len;
3498
3499 rqst_page_get_length(&new_rq[i], j, &len, &offset);
3500
3501 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
3502 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
3503
3504 memcpy(dst, src, len);
3505 kunmap(new_rq[i].rq_pages[j]);
3506 kunmap(old_rq[i - 1].rq_pages[j]);
3507 }
3508 }
3509
3510 /* fill the 1st iov with a transform header */
3511 fill_transform_hdr(tr_hdr, orig_len, old_rq);
3512
3513 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02003514 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003515 if (rc)
3516 goto err_free;
3517
3518 return rc;
3519
3520err_free:
3521 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
3522 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003523}
3524
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003525static int
3526smb3_is_transform_hdr(void *buf)
3527{
3528 struct smb2_transform_hdr *trhdr = buf;
3529
3530 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
3531}
3532
3533static int
3534decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
3535 unsigned int buf_data_size, struct page **pages,
3536 unsigned int npages, unsigned int page_data_size)
3537{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003538 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003539 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003540 int rc;
3541
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003542 iov[0].iov_base = buf;
3543 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
3544 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
3545 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003546
3547 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003548 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003549 rqst.rq_pages = pages;
3550 rqst.rq_npages = npages;
3551 rqst.rq_pagesz = PAGE_SIZE;
3552 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
3553
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003554 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02003555 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003556
3557 if (rc)
3558 return rc;
3559
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003560 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003561
3562 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003563
3564 return rc;
3565}
3566
3567static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003568read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
3569 unsigned int npages, unsigned int len)
3570{
3571 int i;
3572 int length;
3573
3574 for (i = 0; i < npages; i++) {
3575 struct page *page = pages[i];
3576 size_t n;
3577
3578 n = len;
3579 if (len >= PAGE_SIZE) {
3580 /* enough data to fill the page */
3581 n = PAGE_SIZE;
3582 len -= n;
3583 } else {
3584 zero_user(page, len, PAGE_SIZE - len);
3585 len = 0;
3586 }
Long Li1dbe3462018-05-30 12:47:55 -07003587 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003588 if (length < 0)
3589 return length;
3590 server->total_read += length;
3591 }
3592
3593 return 0;
3594}
3595
3596static int
3597init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
3598 unsigned int cur_off, struct bio_vec **page_vec)
3599{
3600 struct bio_vec *bvec;
3601 int i;
3602
3603 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
3604 if (!bvec)
3605 return -ENOMEM;
3606
3607 for (i = 0; i < npages; i++) {
3608 bvec[i].bv_page = pages[i];
3609 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
3610 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
3611 data_size -= bvec[i].bv_len;
3612 }
3613
3614 if (data_size != 0) {
3615 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
3616 kfree(bvec);
3617 return -EIO;
3618 }
3619
3620 *page_vec = bvec;
3621 return 0;
3622}
3623
3624static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003625handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3626 char *buf, unsigned int buf_len, struct page **pages,
3627 unsigned int npages, unsigned int page_data_size)
3628{
3629 unsigned int data_offset;
3630 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003631 unsigned int cur_off;
3632 unsigned int cur_page_idx;
3633 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003634 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10003635 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003636 struct bio_vec *bvec = NULL;
3637 struct iov_iter iter;
3638 struct kvec iov;
3639 int length;
Long Li74dcf412017-11-22 17:38:46 -07003640 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003641
3642 if (shdr->Command != SMB2_READ) {
3643 cifs_dbg(VFS, "only big read responses are supported\n");
3644 return -ENOTSUPP;
3645 }
3646
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003647 if (server->ops->is_session_expired &&
3648 server->ops->is_session_expired(buf)) {
3649 cifs_reconnect(server);
3650 wake_up(&server->response_q);
3651 return -1;
3652 }
3653
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003654 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08003655 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003656 return -1;
3657
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003658 /* set up first two iov to get credits */
3659 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003660 rdata->iov[0].iov_len = 0;
3661 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003662 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003663 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003664 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3665 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3666 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3667 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3668
3669 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003670 if (rdata->result != 0) {
3671 cifs_dbg(FYI, "%s: server returned error %d\n",
3672 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003673 /* normal error on read response */
3674 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003675 return 0;
3676 }
3677
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003678 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07003679#ifdef CONFIG_CIFS_SMB_DIRECT
3680 use_rdma_mr = rdata->mr;
3681#endif
3682 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003683
3684 if (data_offset < server->vals->read_rsp_size) {
3685 /*
3686 * win2k8 sometimes sends an offset of 0 when the read
3687 * is beyond the EOF. Treat it as if the data starts just after
3688 * the header.
3689 */
3690 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
3691 __func__, data_offset);
3692 data_offset = server->vals->read_rsp_size;
3693 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
3694 /* data_offset is beyond the end of smallbuf */
3695 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
3696 __func__, data_offset);
3697 rdata->result = -EIO;
3698 dequeue_mid(mid, rdata->result);
3699 return 0;
3700 }
3701
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003702 pad_len = data_offset - server->vals->read_rsp_size;
3703
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003704 if (buf_len <= data_offset) {
3705 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003706 cur_page_idx = pad_len / PAGE_SIZE;
3707 cur_off = pad_len % PAGE_SIZE;
3708
3709 if (cur_page_idx != 0) {
3710 /* data offset is beyond the 1st page of response */
3711 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
3712 __func__, data_offset);
3713 rdata->result = -EIO;
3714 dequeue_mid(mid, rdata->result);
3715 return 0;
3716 }
3717
3718 if (data_len > page_data_size - pad_len) {
3719 /* data_len is corrupt -- discard frame */
3720 rdata->result = -EIO;
3721 dequeue_mid(mid, rdata->result);
3722 return 0;
3723 }
3724
3725 rdata->result = init_read_bvec(pages, npages, page_data_size,
3726 cur_off, &bvec);
3727 if (rdata->result != 0) {
3728 dequeue_mid(mid, rdata->result);
3729 return 0;
3730 }
3731
David Howellsaa563d72018-10-20 00:57:56 +01003732 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003733 } else if (buf_len >= data_offset + data_len) {
3734 /* read response payload is in buf */
3735 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
3736 iov.iov_base = buf + data_offset;
3737 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01003738 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003739 } else {
3740 /* read response payload cannot be in both buf and pages */
3741 WARN_ONCE(1, "buf can not contain only a part of read data");
3742 rdata->result = -EIO;
3743 dequeue_mid(mid, rdata->result);
3744 return 0;
3745 }
3746
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003747 length = rdata->copy_into_pages(server, rdata, &iter);
3748
3749 kfree(bvec);
3750
3751 if (length < 0)
3752 return length;
3753
3754 dequeue_mid(mid, false);
3755 return length;
3756}
3757
3758static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003759receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
3760{
3761 char *buf = server->smallbuf;
3762 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
3763 unsigned int npages;
3764 struct page **pages;
3765 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003766 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003767 int rc;
3768 int i = 0;
3769
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003770 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003771 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
3772
3773 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
3774 if (rc < 0)
3775 return rc;
3776 server->total_read += rc;
3777
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003778 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11003779 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003780 npages = DIV_ROUND_UP(len, PAGE_SIZE);
3781
3782 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
3783 if (!pages) {
3784 rc = -ENOMEM;
3785 goto discard_data;
3786 }
3787
3788 for (; i < npages; i++) {
3789 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3790 if (!pages[i]) {
3791 rc = -ENOMEM;
3792 goto discard_data;
3793 }
3794 }
3795
3796 /* read read data into pages */
3797 rc = read_data_into_pages(server, pages, npages, len);
3798 if (rc)
3799 goto free_pages;
3800
Pavel Shilovsky350be252017-04-10 10:31:33 -07003801 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003802 if (rc)
3803 goto free_pages;
3804
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003805 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003806 pages, npages, len);
3807 if (rc)
3808 goto free_pages;
3809
3810 *mid = smb2_find_mid(server, buf);
3811 if (*mid == NULL)
3812 cifs_dbg(FYI, "mid not found\n");
3813 else {
3814 cifs_dbg(FYI, "mid found\n");
3815 (*mid)->decrypted = true;
3816 rc = handle_read_data(server, *mid, buf,
3817 server->vals->read_rsp_size,
3818 pages, npages, len);
3819 }
3820
3821free_pages:
3822 for (i = i - 1; i >= 0; i--)
3823 put_page(pages[i]);
3824 kfree(pages);
3825 return rc;
3826discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07003827 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003828 goto free_pages;
3829}
3830
3831static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003832receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003833 struct mid_q_entry **mids, char **bufs,
3834 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003835{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003836 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003837 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003838 char *tmpbuf;
3839 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10003840 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003841 unsigned int buf_size;
3842 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003843 int next_is_large;
3844 char *next_buffer = NULL;
3845
3846 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003847
3848 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003849 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003850 server->large_buf = true;
3851 memcpy(server->bigbuf, buf, server->total_read);
3852 buf = server->bigbuf;
3853 }
3854
3855 /* now read the rest */
3856 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003857 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003858 if (length < 0)
3859 return length;
3860 server->total_read += length;
3861
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003862 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003863 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
3864 if (length)
3865 return length;
3866
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003867 next_is_large = server->large_buf;
3868 one_more:
3869 shdr = (struct smb2_sync_hdr *)buf;
3870 if (shdr->NextCommand) {
3871 if (next_is_large) {
3872 tmpbuf = server->bigbuf;
3873 next_buffer = (char *)cifs_buf_get();
3874 } else {
3875 tmpbuf = server->smallbuf;
3876 next_buffer = (char *)cifs_small_buf_get();
3877 }
3878 memcpy(next_buffer,
3879 tmpbuf + le32_to_cpu(shdr->NextCommand),
3880 pdu_length - le32_to_cpu(shdr->NextCommand));
3881 }
3882
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003883 mid_entry = smb2_find_mid(server, buf);
3884 if (mid_entry == NULL)
3885 cifs_dbg(FYI, "mid not found\n");
3886 else {
3887 cifs_dbg(FYI, "mid found\n");
3888 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003889 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003890 }
3891
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003892 if (*num_mids >= MAX_COMPOUND) {
3893 cifs_dbg(VFS, "too many PDUs in compound\n");
3894 return -1;
3895 }
3896 bufs[*num_mids] = buf;
3897 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003898
3899 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003900 ret = mid_entry->handle(server, mid_entry);
3901 else
3902 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003903
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003904 if (ret == 0 && shdr->NextCommand) {
3905 pdu_length -= le32_to_cpu(shdr->NextCommand);
3906 server->large_buf = next_is_large;
3907 if (next_is_large)
3908 server->bigbuf = next_buffer;
3909 else
3910 server->smallbuf = next_buffer;
3911
3912 buf += le32_to_cpu(shdr->NextCommand);
3913 goto one_more;
3914 }
3915
3916 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003917}
3918
3919static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003920smb3_receive_transform(struct TCP_Server_Info *server,
3921 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003922{
3923 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10003924 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003925 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
3926 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3927
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003928 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003929 sizeof(struct smb2_sync_hdr)) {
3930 cifs_dbg(VFS, "Transform message is too small (%u)\n",
3931 pdu_length);
3932 cifs_reconnect(server);
3933 wake_up(&server->response_q);
3934 return -ECONNABORTED;
3935 }
3936
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003937 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003938 cifs_dbg(VFS, "Transform message is broken\n");
3939 cifs_reconnect(server);
3940 wake_up(&server->response_q);
3941 return -ECONNABORTED;
3942 }
3943
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003944 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08003945 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
3946 *num_mids = 1;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003947 return receive_encrypted_read(server, &mids[0]);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08003948 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003949
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003950 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003951}
3952
3953int
3954smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
3955{
3956 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
3957
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003958 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003959 NULL, 0, 0);
3960}
3961
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10003962static int
3963smb2_next_header(char *buf)
3964{
3965 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
3966 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
3967
3968 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
3969 return sizeof(struct smb2_transform_hdr) +
3970 le32_to_cpu(t_hdr->OriginalMessageSize);
3971
3972 return le32_to_cpu(hdr->NextCommand);
3973}
3974
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05003975static int
3976smb2_make_node(unsigned int xid, struct inode *inode,
3977 struct dentry *dentry, struct cifs_tcon *tcon,
3978 char *full_path, umode_t mode, dev_t dev)
3979{
3980 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3981 int rc = -EPERM;
3982 int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
3983 FILE_ALL_INFO *buf = NULL;
3984 struct cifs_io_parms io_parms;
3985 __u32 oplock = 0;
3986 struct cifs_fid fid;
3987 struct cifs_open_parms oparms;
3988 unsigned int bytes_written;
3989 struct win_dev *pdev;
3990 struct kvec iov[2];
3991
3992 /*
3993 * Check if mounted with mount parm 'sfu' mount parm.
3994 * SFU emulation should work with all servers, but only
3995 * supports block and char device (no socket & fifo),
3996 * and was used by default in earlier versions of Windows
3997 */
3998 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
3999 goto out;
4000
4001 /*
4002 * TODO: Add ability to create instead via reparse point. Windows (e.g.
4003 * their current NFS server) uses this approach to expose special files
4004 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
4005 */
4006
4007 if (!S_ISCHR(mode) && !S_ISBLK(mode))
4008 goto out;
4009
4010 cifs_dbg(FYI, "sfu compat create special file\n");
4011
4012 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
4013 if (buf == NULL) {
4014 rc = -ENOMEM;
4015 goto out;
4016 }
4017
4018 if (backup_cred(cifs_sb))
4019 create_options |= CREATE_OPEN_BACKUP_INTENT;
4020
4021 oparms.tcon = tcon;
4022 oparms.cifs_sb = cifs_sb;
4023 oparms.desired_access = GENERIC_WRITE;
4024 oparms.create_options = create_options;
4025 oparms.disposition = FILE_CREATE;
4026 oparms.path = full_path;
4027 oparms.fid = &fid;
4028 oparms.reconnect = false;
4029
4030 if (tcon->ses->server->oplocks)
4031 oplock = REQ_OPLOCK;
4032 else
4033 oplock = 0;
4034 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
4035 if (rc)
4036 goto out;
4037
4038 /*
4039 * BB Do not bother to decode buf since no local inode yet to put
4040 * timestamps in, but we can reuse it safely.
4041 */
4042
4043 pdev = (struct win_dev *)buf;
4044 io_parms.pid = current->tgid;
4045 io_parms.tcon = tcon;
4046 io_parms.offset = 0;
4047 io_parms.length = sizeof(struct win_dev);
4048 iov[1].iov_base = buf;
4049 iov[1].iov_len = sizeof(struct win_dev);
4050 if (S_ISCHR(mode)) {
4051 memcpy(pdev->type, "IntxCHR", 8);
4052 pdev->major = cpu_to_le64(MAJOR(dev));
4053 pdev->minor = cpu_to_le64(MINOR(dev));
4054 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4055 &bytes_written, iov, 1);
4056 } else if (S_ISBLK(mode)) {
4057 memcpy(pdev->type, "IntxBLK", 8);
4058 pdev->major = cpu_to_le64(MAJOR(dev));
4059 pdev->minor = cpu_to_le64(MINOR(dev));
4060 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4061 &bytes_written, iov, 1);
4062 }
4063 tcon->ses->server->ops->close(xid, tcon, &fid);
4064 d_drop(dentry);
4065
4066 /* FIXME: add code here to set EAs */
4067out:
4068 kfree(buf);
4069 return rc;
4070}
4071
4072
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004073struct smb_version_operations smb20_operations = {
4074 .compare_fids = smb2_compare_fids,
4075 .setup_request = smb2_setup_request,
4076 .setup_async_request = smb2_setup_async_request,
4077 .check_receive = smb2_check_receive,
4078 .add_credits = smb2_add_credits,
4079 .set_credits = smb2_set_credits,
4080 .get_credits_field = smb2_get_credits_field,
4081 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004082 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004083 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004084 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004085 .read_data_offset = smb2_read_data_offset,
4086 .read_data_length = smb2_read_data_length,
4087 .map_error = map_smb2_to_linux_error,
4088 .find_mid = smb2_find_mid,
4089 .check_message = smb2_check_message,
4090 .dump_detail = smb2_dump_detail,
4091 .clear_stats = smb2_clear_stats,
4092 .print_stats = smb2_print_stats,
4093 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004094 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004095 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004096 .need_neg = smb2_need_neg,
4097 .negotiate = smb2_negotiate,
4098 .negotiate_wsize = smb2_negotiate_wsize,
4099 .negotiate_rsize = smb2_negotiate_rsize,
4100 .sess_setup = SMB2_sess_setup,
4101 .logoff = SMB2_logoff,
4102 .tree_connect = SMB2_tcon,
4103 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004104 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004105 .is_path_accessible = smb2_is_path_accessible,
4106 .can_echo = smb2_can_echo,
4107 .echo = SMB2_echo,
4108 .query_path_info = smb2_query_path_info,
4109 .get_srv_inum = smb2_get_srv_inum,
4110 .query_file_info = smb2_query_file_info,
4111 .set_path_size = smb2_set_path_size,
4112 .set_file_size = smb2_set_file_size,
4113 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004114 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004115 .mkdir = smb2_mkdir,
4116 .mkdir_setinfo = smb2_mkdir_setinfo,
4117 .rmdir = smb2_rmdir,
4118 .unlink = smb2_unlink,
4119 .rename = smb2_rename_path,
4120 .create_hardlink = smb2_create_hardlink,
4121 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01004122 .query_mf_symlink = smb3_query_mf_symlink,
4123 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004124 .open = smb2_open_file,
4125 .set_fid = smb2_set_fid,
4126 .close = smb2_close_file,
4127 .flush = smb2_flush_file,
4128 .async_readv = smb2_async_readv,
4129 .async_writev = smb2_async_writev,
4130 .sync_read = smb2_sync_read,
4131 .sync_write = smb2_sync_write,
4132 .query_dir_first = smb2_query_dir_first,
4133 .query_dir_next = smb2_query_dir_next,
4134 .close_dir = smb2_close_dir,
4135 .calc_smb_size = smb2_calc_size,
4136 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004137 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004138 .oplock_response = smb2_oplock_response,
4139 .queryfs = smb2_queryfs,
4140 .mand_lock = smb2_mand_lock,
4141 .mand_unlock_range = smb2_unlock_range,
4142 .push_mand_locks = smb2_push_mandatory_locks,
4143 .get_lease_key = smb2_get_lease_key,
4144 .set_lease_key = smb2_set_lease_key,
4145 .new_lease_key = smb2_new_lease_key,
4146 .calc_signature = smb2_calc_signature,
4147 .is_read_op = smb2_is_read_op,
4148 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004149 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004150 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004151 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004152 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004153 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004154 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304155 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004156#ifdef CONFIG_CIFS_XATTR
4157 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004158 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004159#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004160#ifdef CONFIG_CIFS_ACL
4161 .get_acl = get_smb2_acl,
4162 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004163 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004164#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004165 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004166 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004167 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004168 .fiemap = smb3_fiemap,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004169};
4170
Steve French1080ef72011-02-24 18:07:19 +00004171struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004172 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004173 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04004174 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004175 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04004176 .add_credits = smb2_add_credits,
4177 .set_credits = smb2_set_credits,
4178 .get_credits_field = smb2_get_credits_field,
4179 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004180 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004181 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004182 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004183 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004184 .read_data_offset = smb2_read_data_offset,
4185 .read_data_length = smb2_read_data_length,
4186 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004187 .find_mid = smb2_find_mid,
4188 .check_message = smb2_check_message,
4189 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04004190 .clear_stats = smb2_clear_stats,
4191 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004192 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004193 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004194 .downgrade_oplock = smb21_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04004195 .need_neg = smb2_need_neg,
4196 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07004197 .negotiate_wsize = smb2_negotiate_wsize,
4198 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04004199 .sess_setup = SMB2_sess_setup,
4200 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04004201 .tree_connect = SMB2_tcon,
4202 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004203 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04004204 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04004205 .can_echo = smb2_can_echo,
4206 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04004207 .query_path_info = smb2_query_path_info,
4208 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07004209 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004210 .set_path_size = smb2_set_path_size,
4211 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004212 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004213 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04004214 .mkdir = smb2_mkdir,
4215 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04004216 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07004217 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004218 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07004219 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004220 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004221 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004222 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07004223 .open = smb2_open_file,
4224 .set_fid = smb2_set_fid,
4225 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07004226 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004227 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07004228 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004229 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004230 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004231 .query_dir_first = smb2_query_dir_first,
4232 .query_dir_next = smb2_query_dir_next,
4233 .close_dir = smb2_close_dir,
4234 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07004235 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004236 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004237 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07004238 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07004239 .mand_lock = smb2_mand_lock,
4240 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07004241 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07004242 .get_lease_key = smb2_get_lease_key,
4243 .set_lease_key = smb2_set_lease_key,
4244 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06004245 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004246 .is_read_op = smb21_is_read_op,
4247 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004248 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004249 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004250 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004251 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004252 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05004253 .enum_snapshots = smb3_enum_snapshots,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004254 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304255 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004256#ifdef CONFIG_CIFS_XATTR
4257 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004258 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004259#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004260#ifdef CONFIG_CIFS_ACL
4261 .get_acl = get_smb2_acl,
4262 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004263 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004264#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004265 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004266 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004267 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004268 .fiemap = smb3_fiemap,
Steve French38107d42012-12-08 22:08:06 -06004269};
4270
Steve French38107d42012-12-08 22:08:06 -06004271struct smb_version_operations smb30_operations = {
4272 .compare_fids = smb2_compare_fids,
4273 .setup_request = smb2_setup_request,
4274 .setup_async_request = smb2_setup_async_request,
4275 .check_receive = smb2_check_receive,
4276 .add_credits = smb2_add_credits,
4277 .set_credits = smb2_set_credits,
4278 .get_credits_field = smb2_get_credits_field,
4279 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004280 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004281 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06004282 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004283 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06004284 .read_data_offset = smb2_read_data_offset,
4285 .read_data_length = smb2_read_data_length,
4286 .map_error = map_smb2_to_linux_error,
4287 .find_mid = smb2_find_mid,
4288 .check_message = smb2_check_message,
4289 .dump_detail = smb2_dump_detail,
4290 .clear_stats = smb2_clear_stats,
4291 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05004292 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06004293 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004294 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004295 .downgrade_oplock = smb21_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06004296 .need_neg = smb2_need_neg,
4297 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004298 .negotiate_wsize = smb3_negotiate_wsize,
4299 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06004300 .sess_setup = SMB2_sess_setup,
4301 .logoff = SMB2_logoff,
4302 .tree_connect = SMB2_tcon,
4303 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05004304 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06004305 .is_path_accessible = smb2_is_path_accessible,
4306 .can_echo = smb2_can_echo,
4307 .echo = SMB2_echo,
4308 .query_path_info = smb2_query_path_info,
4309 .get_srv_inum = smb2_get_srv_inum,
4310 .query_file_info = smb2_query_file_info,
4311 .set_path_size = smb2_set_path_size,
4312 .set_file_size = smb2_set_file_size,
4313 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004314 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06004315 .mkdir = smb2_mkdir,
4316 .mkdir_setinfo = smb2_mkdir_setinfo,
4317 .rmdir = smb2_rmdir,
4318 .unlink = smb2_unlink,
4319 .rename = smb2_rename_path,
4320 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004321 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004322 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004323 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06004324 .open = smb2_open_file,
4325 .set_fid = smb2_set_fid,
4326 .close = smb2_close_file,
4327 .flush = smb2_flush_file,
4328 .async_readv = smb2_async_readv,
4329 .async_writev = smb2_async_writev,
4330 .sync_read = smb2_sync_read,
4331 .sync_write = smb2_sync_write,
4332 .query_dir_first = smb2_query_dir_first,
4333 .query_dir_next = smb2_query_dir_next,
4334 .close_dir = smb2_close_dir,
4335 .calc_smb_size = smb2_calc_size,
4336 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004337 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06004338 .oplock_response = smb2_oplock_response,
4339 .queryfs = smb2_queryfs,
4340 .mand_lock = smb2_mand_lock,
4341 .mand_unlock_range = smb2_unlock_range,
4342 .push_mand_locks = smb2_push_mandatory_locks,
4343 .get_lease_key = smb2_get_lease_key,
4344 .set_lease_key = smb2_set_lease_key,
4345 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004346 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06004347 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004348 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004349 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004350 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004351 .create_lease_buf = smb3_create_lease_buf,
4352 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004353 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05004354 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06004355 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004356 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004357 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05004358 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004359 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004360 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004361 .is_transform_hdr = smb3_is_transform_hdr,
4362 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004363 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304364 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004365#ifdef CONFIG_CIFS_XATTR
4366 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004367 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004368#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004369#ifdef CONFIG_CIFS_ACL
4370 .get_acl = get_smb2_acl,
4371 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004372 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004373#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004374 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004375 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004376 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004377 .fiemap = smb3_fiemap,
Steve French1080ef72011-02-24 18:07:19 +00004378};
4379
Steve Frenchaab18932015-06-23 23:37:11 -05004380struct smb_version_operations smb311_operations = {
4381 .compare_fids = smb2_compare_fids,
4382 .setup_request = smb2_setup_request,
4383 .setup_async_request = smb2_setup_async_request,
4384 .check_receive = smb2_check_receive,
4385 .add_credits = smb2_add_credits,
4386 .set_credits = smb2_set_credits,
4387 .get_credits_field = smb2_get_credits_field,
4388 .get_credits = smb2_get_credits,
4389 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004390 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05004391 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004392 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05004393 .read_data_offset = smb2_read_data_offset,
4394 .read_data_length = smb2_read_data_length,
4395 .map_error = map_smb2_to_linux_error,
4396 .find_mid = smb2_find_mid,
4397 .check_message = smb2_check_message,
4398 .dump_detail = smb2_dump_detail,
4399 .clear_stats = smb2_clear_stats,
4400 .print_stats = smb2_print_stats,
4401 .dump_share_caps = smb2_dump_share_caps,
4402 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004403 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004404 .downgrade_oplock = smb21_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05004405 .need_neg = smb2_need_neg,
4406 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004407 .negotiate_wsize = smb3_negotiate_wsize,
4408 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05004409 .sess_setup = SMB2_sess_setup,
4410 .logoff = SMB2_logoff,
4411 .tree_connect = SMB2_tcon,
4412 .tree_disconnect = SMB2_tdis,
4413 .qfs_tcon = smb3_qfs_tcon,
4414 .is_path_accessible = smb2_is_path_accessible,
4415 .can_echo = smb2_can_echo,
4416 .echo = SMB2_echo,
4417 .query_path_info = smb2_query_path_info,
4418 .get_srv_inum = smb2_get_srv_inum,
4419 .query_file_info = smb2_query_file_info,
4420 .set_path_size = smb2_set_path_size,
4421 .set_file_size = smb2_set_file_size,
4422 .set_file_info = smb2_set_file_info,
4423 .set_compression = smb2_set_compression,
4424 .mkdir = smb2_mkdir,
4425 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05004426 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05004427 .rmdir = smb2_rmdir,
4428 .unlink = smb2_unlink,
4429 .rename = smb2_rename_path,
4430 .create_hardlink = smb2_create_hardlink,
4431 .query_symlink = smb2_query_symlink,
4432 .query_mf_symlink = smb3_query_mf_symlink,
4433 .create_mf_symlink = smb3_create_mf_symlink,
4434 .open = smb2_open_file,
4435 .set_fid = smb2_set_fid,
4436 .close = smb2_close_file,
4437 .flush = smb2_flush_file,
4438 .async_readv = smb2_async_readv,
4439 .async_writev = smb2_async_writev,
4440 .sync_read = smb2_sync_read,
4441 .sync_write = smb2_sync_write,
4442 .query_dir_first = smb2_query_dir_first,
4443 .query_dir_next = smb2_query_dir_next,
4444 .close_dir = smb2_close_dir,
4445 .calc_smb_size = smb2_calc_size,
4446 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004447 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05004448 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05004449 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05004450 .mand_lock = smb2_mand_lock,
4451 .mand_unlock_range = smb2_unlock_range,
4452 .push_mand_locks = smb2_push_mandatory_locks,
4453 .get_lease_key = smb2_get_lease_key,
4454 .set_lease_key = smb2_set_lease_key,
4455 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004456 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05004457 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004458 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05004459 .is_read_op = smb21_is_read_op,
4460 .set_oplock_level = smb3_set_oplock_level,
4461 .create_lease_buf = smb3_create_lease_buf,
4462 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004463 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07004464 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05004465/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
4466 .wp_retry_size = smb2_wp_retry_size,
4467 .dir_needs_close = smb2_dir_needs_close,
4468 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004469 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004470 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004471 .is_transform_hdr = smb3_is_transform_hdr,
4472 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004473 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304474 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004475#ifdef CONFIG_CIFS_XATTR
4476 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004477 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004478#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10004479#ifdef CONFIG_CIFS_ACL
4480 .get_acl = get_smb2_acl,
4481 .get_acl_by_fid = get_smb2_acl_by_fid,
4482 .set_acl = set_smb2_acl,
4483#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004484 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004485 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004486 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004487 .fiemap = smb3_fiemap,
Steve Frenchaab18932015-06-23 23:37:11 -05004488};
Steve Frenchaab18932015-06-23 23:37:11 -05004489
Steve Frenchdd446b12012-11-28 23:21:06 -06004490struct smb_version_values smb20_values = {
4491 .version_string = SMB20_VERSION_STRING,
4492 .protocol_id = SMB20_PROT_ID,
4493 .req_capabilities = 0, /* MBZ */
4494 .large_lock_type = 0,
4495 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4496 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4497 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004498 .header_size = sizeof(struct smb2_sync_hdr),
4499 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06004500 .max_header_size = MAX_SMB2_HDR_SIZE,
4501 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4502 .lock_cmd = SMB2_LOCK,
4503 .cap_unix = 0,
4504 .cap_nt_find = SMB2_NT_FIND,
4505 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004506 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4507 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004508 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06004509};
4510
Steve French1080ef72011-02-24 18:07:19 +00004511struct smb_version_values smb21_values = {
4512 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004513 .protocol_id = SMB21_PROT_ID,
4514 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
4515 .large_lock_type = 0,
4516 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4517 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4518 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004519 .header_size = sizeof(struct smb2_sync_hdr),
4520 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004521 .max_header_size = MAX_SMB2_HDR_SIZE,
4522 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4523 .lock_cmd = SMB2_LOCK,
4524 .cap_unix = 0,
4525 .cap_nt_find = SMB2_NT_FIND,
4526 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004527 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4528 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004529 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05004530};
4531
Steve French9764c022017-09-17 10:41:35 -05004532struct smb_version_values smb3any_values = {
4533 .version_string = SMB3ANY_VERSION_STRING,
4534 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004535 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004536 .large_lock_type = 0,
4537 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4538 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4539 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004540 .header_size = sizeof(struct smb2_sync_hdr),
4541 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004542 .max_header_size = MAX_SMB2_HDR_SIZE,
4543 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4544 .lock_cmd = SMB2_LOCK,
4545 .cap_unix = 0,
4546 .cap_nt_find = SMB2_NT_FIND,
4547 .cap_large_files = SMB2_LARGE_FILES,
4548 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4549 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4550 .create_lease_size = sizeof(struct create_lease_v2),
4551};
4552
4553struct smb_version_values smbdefault_values = {
4554 .version_string = SMBDEFAULT_VERSION_STRING,
4555 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004556 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004557 .large_lock_type = 0,
4558 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4559 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4560 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004561 .header_size = sizeof(struct smb2_sync_hdr),
4562 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004563 .max_header_size = MAX_SMB2_HDR_SIZE,
4564 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4565 .lock_cmd = SMB2_LOCK,
4566 .cap_unix = 0,
4567 .cap_nt_find = SMB2_NT_FIND,
4568 .cap_large_files = SMB2_LARGE_FILES,
4569 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4570 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4571 .create_lease_size = sizeof(struct create_lease_v2),
4572};
4573
Steve Frenche4aa25e2012-10-01 12:26:22 -05004574struct smb_version_values smb30_values = {
4575 .version_string = SMB30_VERSION_STRING,
4576 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004577 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004578 .large_lock_type = 0,
4579 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4580 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4581 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004582 .header_size = sizeof(struct smb2_sync_hdr),
4583 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004584 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004585 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004586 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004587 .cap_unix = 0,
4588 .cap_nt_find = SMB2_NT_FIND,
4589 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004590 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4591 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004592 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00004593};
Steve French20b6d8b2013-06-12 22:48:41 -05004594
4595struct smb_version_values smb302_values = {
4596 .version_string = SMB302_VERSION_STRING,
4597 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004598 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05004599 .large_lock_type = 0,
4600 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4601 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4602 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004603 .header_size = sizeof(struct smb2_sync_hdr),
4604 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05004605 .max_header_size = MAX_SMB2_HDR_SIZE,
4606 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4607 .lock_cmd = SMB2_LOCK,
4608 .cap_unix = 0,
4609 .cap_nt_find = SMB2_NT_FIND,
4610 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004611 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4612 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004613 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05004614};
Steve French5f7fbf72014-12-17 22:52:58 -06004615
Steve French5f7fbf72014-12-17 22:52:58 -06004616struct smb_version_values smb311_values = {
4617 .version_string = SMB311_VERSION_STRING,
4618 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004619 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06004620 .large_lock_type = 0,
4621 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4622 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4623 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004624 .header_size = sizeof(struct smb2_sync_hdr),
4625 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06004626 .max_header_size = MAX_SMB2_HDR_SIZE,
4627 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4628 .lock_cmd = SMB2_LOCK,
4629 .cap_unix = 0,
4630 .cap_nt_find = SMB2_NT_FIND,
4631 .cap_large_files = SMB2_LARGE_FILES,
4632 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4633 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4634 .create_lease_size = sizeof(struct create_lease_v2),
4635};