blob: ed8c2ba9590bd327e29989d133362af14b6aeded [file] [log] [blame]
Steve French1080ef72011-02-24 18:07:19 +00001/*
2 * SMB2 version specific operations
3 *
4 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
5 *
6 * This library is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License v2 as published
8 * by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public License
16 * along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -070020#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070021#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050022#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070023#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020024#include <linux/uuid.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070025#include <crypto/aead.h>
Steve French1080ef72011-02-24 18:07:19 +000026#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040027#include "smb2pdu.h"
28#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040029#include "cifsproto.h"
30#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040031#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070032#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070033#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050034#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070035#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040036
Pavel Shilovskyef68e832019-01-18 17:25:36 -080037/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040038static int
39change_conf(struct TCP_Server_Info *server)
40{
41 server->credits += server->echo_credits + server->oplock_credits;
42 server->oplock_credits = server->echo_credits = 0;
43 switch (server->credits) {
44 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080045 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040046 case 1:
47 server->echoes = false;
48 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040049 break;
50 case 2:
51 server->echoes = true;
52 server->oplocks = false;
53 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040054 break;
55 default:
56 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050057 if (enable_oplocks) {
58 server->oplocks = true;
59 server->oplock_credits = 1;
60 } else
61 server->oplocks = false;
62
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040063 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040064 }
65 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080066 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040067}
68
69static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080070smb2_add_credits(struct TCP_Server_Info *server,
71 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040072{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080073 int *val, rc = -1;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080074 unsigned int add = credits->value;
75 unsigned int instance = credits->instance;
76 bool reconnect_detected = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080077
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040078 spin_lock(&server->req_lock);
79 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050080
81 /* eg found case where write overlapping reconnect messed up credits */
82 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
83 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
84 server->hostname, *val);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080085 if ((instance == 0) || (instance == server->reconnect_instance))
86 *val += add;
87 else
88 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050089
Steve French141891f2016-09-23 00:44:16 -050090 if (*val > 65000) {
91 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
92 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
93 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040094 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040095 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040096 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070097 /*
98 * Sometimes server returns 0 credits on oplock break ack - we need to
99 * rebalance credits in this case.
100 */
101 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
102 server->oplocks) {
103 if (server->credits > 1) {
104 server->credits--;
105 server->oplock_credits++;
106 }
107 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400108 spin_unlock(&server->req_lock);
109 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800110
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800111 if (reconnect_detected)
112 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
113 add, instance);
114
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800115 if (server->tcpStatus == CifsNeedReconnect
116 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800117 return;
118
119 switch (rc) {
120 case -1:
121 /* change_conf hasn't been executed */
122 break;
123 case 0:
124 cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
125 break;
126 case 1:
127 cifs_dbg(VFS, "disabling echoes and oplocks\n");
128 break;
129 case 2:
130 cifs_dbg(FYI, "disabling oplocks\n");
131 break;
132 default:
133 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
134 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400135}
136
137static void
138smb2_set_credits(struct TCP_Server_Info *server, const int val)
139{
140 spin_lock(&server->req_lock);
141 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500142 if (val == 1)
143 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400144 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500145 /* don't log while holding the lock */
146 if (val == 1)
147 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400148}
149
150static int *
151smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
152{
153 switch (optype) {
154 case CIFS_ECHO_OP:
155 return &server->echo_credits;
156 case CIFS_OBREAK_OP:
157 return &server->oplock_credits;
158 default:
159 return &server->credits;
160 }
161}
162
163static unsigned int
164smb2_get_credits(struct mid_q_entry *mid)
165{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000166 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700167
Pavel Shilovsky3d3003f2019-01-22 16:50:21 -0800168 if (mid->mid_state == MID_RESPONSE_RECEIVED
169 || mid->mid_state == MID_RESPONSE_MALFORMED)
170 return le16_to_cpu(shdr->CreditRequest);
171
172 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400173}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400174
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400175static int
176smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800177 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400178{
179 int rc = 0;
180 unsigned int scredits;
181
182 spin_lock(&server->req_lock);
183 while (1) {
184 if (server->credits <= 0) {
185 spin_unlock(&server->req_lock);
186 cifs_num_waiters_inc(server);
187 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000188 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400189 cifs_num_waiters_dec(server);
190 if (rc)
191 return rc;
192 spin_lock(&server->req_lock);
193 } else {
194 if (server->tcpStatus == CifsExiting) {
195 spin_unlock(&server->req_lock);
196 return -ENOENT;
197 }
198
199 scredits = server->credits;
200 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800201 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400202 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800203 credits->value = 0;
204 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400205 break;
206 }
207
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800208 /* leave some credits for reopen and other ops */
209 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400210 *num = min_t(unsigned int, size,
211 scredits * SMB2_MAX_BUFFER_SIZE);
212
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800213 credits->value =
214 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
215 credits->instance = server->reconnect_instance;
216 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400217 server->in_flight++;
218 break;
219 }
220 }
221 spin_unlock(&server->req_lock);
222 return rc;
223}
224
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800225static int
226smb2_adjust_credits(struct TCP_Server_Info *server,
227 struct cifs_credits *credits,
228 const unsigned int payload_size)
229{
230 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
231
232 if (!credits->value || credits->value == new_val)
233 return 0;
234
235 if (credits->value < new_val) {
236 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
237 credits->value, new_val);
238 return -ENOTSUPP;
239 }
240
241 spin_lock(&server->req_lock);
242
243 if (server->reconnect_instance != credits->instance) {
244 spin_unlock(&server->req_lock);
245 cifs_dbg(VFS, "trying to return %d credits to old session\n",
246 credits->value - new_val);
247 return -EAGAIN;
248 }
249
250 server->credits += credits->value - new_val;
251 spin_unlock(&server->req_lock);
252 wake_up(&server->request_q);
253 credits->value = new_val;
254 return 0;
255}
256
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400257static __u64
258smb2_get_next_mid(struct TCP_Server_Info *server)
259{
260 __u64 mid;
261 /* for SMB2 we need the current value */
262 spin_lock(&GlobalMid_Lock);
263 mid = server->CurrentMid++;
264 spin_unlock(&GlobalMid_Lock);
265 return mid;
266}
Steve French1080ef72011-02-24 18:07:19 +0000267
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800268static void
269smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
270{
271 spin_lock(&GlobalMid_Lock);
272 if (server->CurrentMid >= val)
273 server->CurrentMid -= val;
274 spin_unlock(&GlobalMid_Lock);
275}
276
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400277static struct mid_q_entry *
278smb2_find_mid(struct TCP_Server_Info *server, char *buf)
279{
280 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000281 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700282 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400283
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700284 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Steve French373512e2015-12-18 13:05:30 -0600285 cifs_dbg(VFS, "encrypted frame parsing not supported yet");
286 return NULL;
287 }
288
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400289 spin_lock(&GlobalMid_Lock);
290 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000291 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400292 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700293 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200294 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400295 spin_unlock(&GlobalMid_Lock);
296 return mid;
297 }
298 }
299 spin_unlock(&GlobalMid_Lock);
300 return NULL;
301}
302
303static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600304smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400305{
306#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000307 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400308
Joe Perchesf96637b2013-05-04 22:12:25 -0500309 cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700310 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
311 shdr->ProcessId);
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600312 cifs_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500313 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400314#endif
315}
316
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400317static bool
318smb2_need_neg(struct TCP_Server_Info *server)
319{
320 return server->max_read == 0;
321}
322
323static int
324smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
325{
326 int rc;
327 ses->server->CurrentMid = 0;
328 rc = SMB2_negotiate(xid, ses);
329 /* BB we probably don't need to retry with modern servers */
330 if (rc == -EAGAIN)
331 rc = -EHOSTDOWN;
332 return rc;
333}
334
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700335static unsigned int
336smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
337{
338 struct TCP_Server_Info *server = tcon->ses->server;
339 unsigned int wsize;
340
341 /* start with specified wsize, or default */
342 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
343 wsize = min_t(unsigned int, wsize, server->max_write);
Long Li09902f82017-11-22 17:38:39 -0700344#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700345 if (server->rdma) {
346 if (server->sign)
347 wsize = min_t(unsigned int,
348 wsize, server->smbd_conn->max_fragmented_send_size);
349 else
350 wsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700351 wsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700352 }
Long Li09902f82017-11-22 17:38:39 -0700353#endif
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400354 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
355 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700356
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700357 return wsize;
358}
359
360static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500361smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
362{
363 struct TCP_Server_Info *server = tcon->ses->server;
364 unsigned int wsize;
365
366 /* start with specified wsize, or default */
367 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
368 wsize = min_t(unsigned int, wsize, server->max_write);
369#ifdef CONFIG_CIFS_SMB_DIRECT
370 if (server->rdma) {
371 if (server->sign)
372 wsize = min_t(unsigned int,
373 wsize, server->smbd_conn->max_fragmented_send_size);
374 else
375 wsize = min_t(unsigned int,
376 wsize, server->smbd_conn->max_readwrite_size);
377 }
378#endif
379 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
380 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
381
382 return wsize;
383}
384
385static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700386smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
387{
388 struct TCP_Server_Info *server = tcon->ses->server;
389 unsigned int rsize;
390
391 /* start with specified rsize, or default */
392 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
393 rsize = min_t(unsigned int, rsize, server->max_read);
Long Li09902f82017-11-22 17:38:39 -0700394#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700395 if (server->rdma) {
396 if (server->sign)
397 rsize = min_t(unsigned int,
398 rsize, server->smbd_conn->max_fragmented_recv_size);
399 else
400 rsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700401 rsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700402 }
Long Li09902f82017-11-22 17:38:39 -0700403#endif
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400404
405 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
406 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700407
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700408 return rsize;
409}
410
Steve French3d621232018-09-25 15:33:47 -0500411static unsigned int
412smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
413{
414 struct TCP_Server_Info *server = tcon->ses->server;
415 unsigned int rsize;
416
417 /* start with specified rsize, or default */
418 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
419 rsize = min_t(unsigned int, rsize, server->max_read);
420#ifdef CONFIG_CIFS_SMB_DIRECT
421 if (server->rdma) {
422 if (server->sign)
423 rsize = min_t(unsigned int,
424 rsize, server->smbd_conn->max_fragmented_recv_size);
425 else
426 rsize = min_t(unsigned int,
427 rsize, server->smbd_conn->max_readwrite_size);
428 }
429#endif
430
431 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
432 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
433
434 return rsize;
435}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200436
437static int
438parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
439 size_t buf_len,
440 struct cifs_server_iface **iface_list,
441 size_t *iface_count)
442{
443 struct network_interface_info_ioctl_rsp *p;
444 struct sockaddr_in *addr4;
445 struct sockaddr_in6 *addr6;
446 struct iface_info_ipv4 *p4;
447 struct iface_info_ipv6 *p6;
448 struct cifs_server_iface *info;
449 ssize_t bytes_left;
450 size_t next = 0;
451 int nb_iface = 0;
452 int rc = 0;
453
454 *iface_list = NULL;
455 *iface_count = 0;
456
457 /*
458 * Fist pass: count and sanity check
459 */
460
461 bytes_left = buf_len;
462 p = buf;
463 while (bytes_left >= sizeof(*p)) {
464 nb_iface++;
465 next = le32_to_cpu(p->Next);
466 if (!next) {
467 bytes_left -= sizeof(*p);
468 break;
469 }
470 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
471 bytes_left -= next;
472 }
473
474 if (!nb_iface) {
475 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
476 rc = -EINVAL;
477 goto out;
478 }
479
480 if (bytes_left || p->Next)
481 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
482
483
484 /*
485 * Second pass: extract info to internal structure
486 */
487
488 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
489 if (!*iface_list) {
490 rc = -ENOMEM;
491 goto out;
492 }
493
494 info = *iface_list;
495 bytes_left = buf_len;
496 p = buf;
497 while (bytes_left >= sizeof(*p)) {
498 info->speed = le64_to_cpu(p->LinkSpeed);
499 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
500 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
501
502 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
503 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
504 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
505 le32_to_cpu(p->Capability));
506
507 switch (p->Family) {
508 /*
509 * The kernel and wire socket structures have the same
510 * layout and use network byte order but make the
511 * conversion explicit in case either one changes.
512 */
513 case INTERNETWORK:
514 addr4 = (struct sockaddr_in *)&info->sockaddr;
515 p4 = (struct iface_info_ipv4 *)p->Buffer;
516 addr4->sin_family = AF_INET;
517 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
518
519 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
520 addr4->sin_port = cpu_to_be16(CIFS_PORT);
521
522 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
523 &addr4->sin_addr);
524 break;
525 case INTERNETWORKV6:
526 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
527 p6 = (struct iface_info_ipv6 *)p->Buffer;
528 addr6->sin6_family = AF_INET6;
529 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
530
531 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
532 addr6->sin6_flowinfo = 0;
533 addr6->sin6_scope_id = 0;
534 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
535
536 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
537 &addr6->sin6_addr);
538 break;
539 default:
540 cifs_dbg(VFS,
541 "%s: skipping unsupported socket family\n",
542 __func__);
543 goto next_iface;
544 }
545
546 (*iface_count)++;
547 info++;
548next_iface:
549 next = le32_to_cpu(p->Next);
550 if (!next)
551 break;
552 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
553 bytes_left -= next;
554 }
555
556 if (!*iface_count) {
557 rc = -EINVAL;
558 goto out;
559 }
560
561out:
562 if (rc) {
563 kfree(*iface_list);
564 *iface_count = 0;
565 *iface_list = NULL;
566 }
567 return rc;
568}
569
570
Steve Frenchc481e9f2013-10-14 01:21:53 -0500571static int
572SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
573{
574 int rc;
575 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200576 struct network_interface_info_ioctl_rsp *out_buf = NULL;
577 struct cifs_server_iface *iface_list;
578 size_t iface_count;
579 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500580
581 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
582 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
583 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500584 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500585 if (rc == -EOPNOTSUPP) {
586 cifs_dbg(FYI,
587 "server does not support query network interfaces\n");
588 goto out;
589 } else if (rc != 0) {
Steve French9ffc5412014-10-16 15:13:14 -0500590 cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200591 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500592 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200593
594 rc = parse_server_interfaces(out_buf, ret_data_len,
595 &iface_list, &iface_count);
596 if (rc)
597 goto out;
598
599 spin_lock(&ses->iface_lock);
600 kfree(ses->iface_list);
601 ses->iface_list = iface_list;
602 ses->iface_count = iface_count;
603 ses->iface_last_update = jiffies;
604 spin_unlock(&ses->iface_lock);
605
606out:
Steve French24df1482016-09-29 04:20:23 -0500607 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500608 return rc;
609}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500610
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000611static void
612smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000613{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000614 struct cached_fid *cfid = container_of(ref, struct cached_fid,
615 refcount);
616
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000617 if (cfid->is_valid) {
618 cifs_dbg(FYI, "clear cached root file handle\n");
619 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
620 cfid->fid->volatile_fid);
621 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000622 cfid->file_all_info_is_valid = false;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000623 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000624}
625
626void close_shroot(struct cached_fid *cfid)
627{
628 mutex_lock(&cfid->fid_mutex);
629 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000630 mutex_unlock(&cfid->fid_mutex);
631}
632
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000633void
634smb2_cached_lease_break(struct work_struct *work)
635{
636 struct cached_fid *cfid = container_of(work,
637 struct cached_fid, lease_break);
638
639 close_shroot(cfid);
640}
641
Steve French3d4ef9a2018-04-25 22:19:09 -0500642/*
643 * Open the directory at the root of a share
644 */
645int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
646{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000647 struct cifs_ses *ses = tcon->ses;
648 struct TCP_Server_Info *server = ses->server;
649 struct cifs_open_parms oparms;
650 struct smb2_create_rsp *o_rsp = NULL;
651 struct smb2_query_info_rsp *qi_rsp = NULL;
652 int resp_buftype[2];
653 struct smb_rqst rqst[2];
654 struct kvec rsp_iov[2];
655 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
656 struct kvec qi_iov[1];
657 int rc, flags = 0;
658 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000659 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Steve French3d4ef9a2018-04-25 22:19:09 -0500660
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000661 mutex_lock(&tcon->crfid.fid_mutex);
662 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500663 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000664 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000665 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000666 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500667 return 0;
668 }
669
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000670 if (smb3_encryption_required(tcon))
671 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500672
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000673 memset(rqst, 0, sizeof(rqst));
674 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
675 memset(rsp_iov, 0, sizeof(rsp_iov));
676
677 /* Open */
678 memset(&open_iov, 0, sizeof(open_iov));
679 rqst[0].rq_iov = open_iov;
680 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
681
682 oparms.tcon = tcon;
683 oparms.create_options = 0;
684 oparms.desired_access = FILE_READ_ATTRIBUTES;
685 oparms.disposition = FILE_OPEN;
686 oparms.fid = pfid;
687 oparms.reconnect = false;
688
689 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
690 if (rc)
691 goto oshr_exit;
692 smb2_set_next_command(tcon, &rqst[0]);
693
694 memset(&qi_iov, 0, sizeof(qi_iov));
695 rqst[1].rq_iov = qi_iov;
696 rqst[1].rq_nvec = 1;
697
698 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
699 COMPOUND_FID, FILE_ALL_INFORMATION,
700 SMB2_O_INFO_FILE, 0,
701 sizeof(struct smb2_file_all_info) +
702 PATH_MAX * 2, 0, NULL);
703 if (rc)
704 goto oshr_exit;
705
706 smb2_set_related(&rqst[1]);
707
708 rc = compound_send_recv(xid, ses, flags, 2, rqst,
709 resp_buftype, rsp_iov);
710 if (rc)
711 goto oshr_exit;
712
713 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
714 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
715 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
716#ifdef CONFIG_CIFS_DEBUG2
717 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
718#endif /* CIFS_DEBUG2 */
719
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000720 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
721 tcon->crfid.tcon = tcon;
722 tcon->crfid.is_valid = true;
723 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000724
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000725 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
726 kref_get(&tcon->crfid.refcount);
727 oplock = smb2_parse_lease_state(server, o_rsp,
728 &oparms.fid->epoch,
729 oparms.fid->lease_key);
730 } else
731 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000732
733 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
734 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
735 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000736 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000737 le16_to_cpu(qi_rsp->OutputBufferOffset),
738 sizeof(struct smb2_file_all_info),
739 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000740 (char *)&tcon->crfid.file_all_info))
741 tcon->crfid.file_all_info_is_valid = 1;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000742
743 oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000744 mutex_unlock(&tcon->crfid.fid_mutex);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000745 SMB2_open_free(&rqst[0]);
746 SMB2_query_info_free(&rqst[1]);
747 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
748 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French3d4ef9a2018-04-25 22:19:09 -0500749 return rc;
750}
751
Steve French34f62642013-10-09 02:07:00 -0500752static void
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500753smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
754{
755 int rc;
756 __le16 srch_path = 0; /* Null - open root of share */
757 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
758 struct cifs_open_parms oparms;
759 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500760 bool no_cached_open = tcon->nohandlecache;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500761
762 oparms.tcon = tcon;
763 oparms.desired_access = FILE_READ_ATTRIBUTES;
764 oparms.disposition = FILE_OPEN;
765 oparms.create_options = 0;
766 oparms.fid = &fid;
767 oparms.reconnect = false;
768
Steve French3d4ef9a2018-04-25 22:19:09 -0500769 if (no_cached_open)
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000770 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
771 NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500772 else
773 rc = open_shroot(xid, tcon, &fid);
774
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500775 if (rc)
776 return;
777
Steve Frenchc481e9f2013-10-14 01:21:53 -0500778 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500779
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500780 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
781 FS_ATTRIBUTE_INFORMATION);
782 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
783 FS_DEVICE_INFORMATION);
784 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500785 FS_VOLUME_INFORMATION);
786 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500787 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500788 if (no_cached_open)
789 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000790 else
791 close_shroot(&tcon->crfid);
792
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500793 return;
794}
795
796static void
Steve French34f62642013-10-09 02:07:00 -0500797smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
798{
799 int rc;
800 __le16 srch_path = 0; /* Null - open root of share */
801 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
802 struct cifs_open_parms oparms;
803 struct cifs_fid fid;
804
805 oparms.tcon = tcon;
806 oparms.desired_access = FILE_READ_ATTRIBUTES;
807 oparms.disposition = FILE_OPEN;
808 oparms.create_options = 0;
809 oparms.fid = &fid;
810 oparms.reconnect = false;
811
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000812 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500813 if (rc)
814 return;
815
Steven French21671142013-10-09 13:36:35 -0500816 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
817 FS_ATTRIBUTE_INFORMATION);
818 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
819 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500820 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
821 return;
822}
823
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400824static int
825smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
826 struct cifs_sb_info *cifs_sb, const char *full_path)
827{
828 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400829 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700830 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400831 struct cifs_open_parms oparms;
832 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400833
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000834 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500835 return 0;
836
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400837 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
838 if (!utf16_path)
839 return -ENOMEM;
840
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400841 oparms.tcon = tcon;
842 oparms.desired_access = FILE_READ_ATTRIBUTES;
843 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -0500844 if (backup_cred(cifs_sb))
845 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
846 else
847 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400848 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400849 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400850
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000851 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400852 if (rc) {
853 kfree(utf16_path);
854 return rc;
855 }
856
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400857 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400858 kfree(utf16_path);
859 return rc;
860}
861
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400862static int
863smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
864 struct cifs_sb_info *cifs_sb, const char *full_path,
865 u64 *uniqueid, FILE_ALL_INFO *data)
866{
867 *uniqueid = le64_to_cpu(data->IndexNumber);
868 return 0;
869}
870
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700871static int
872smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
873 struct cifs_fid *fid, FILE_ALL_INFO *data)
874{
875 int rc;
876 struct smb2_file_all_info *smb2_data;
877
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400878 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700879 GFP_KERNEL);
880 if (smb2_data == NULL)
881 return -ENOMEM;
882
883 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
884 smb2_data);
885 if (!rc)
886 move_smb2_info_to_cifs(data, smb2_data);
887 kfree(smb2_data);
888 return rc;
889}
890
Arnd Bergmann1368f152017-09-05 11:24:15 +0200891#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000892static ssize_t
893move_smb2_ea_to_cifs(char *dst, size_t dst_size,
894 struct smb2_file_full_ea_info *src, size_t src_size,
895 const unsigned char *ea_name)
896{
897 int rc = 0;
898 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
899 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000900 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000901 size_t name_len, value_len, user_name_len;
902
903 while (src_size > 0) {
904 name = &src->ea_data[0];
905 name_len = (size_t)src->ea_name_length;
906 value = &src->ea_data[src->ea_name_length + 1];
907 value_len = (size_t)le16_to_cpu(src->ea_value_length);
908
909 if (name_len == 0) {
910 break;
911 }
912
913 if (src_size < 8 + name_len + 1 + value_len) {
914 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
915 rc = -EIO;
916 goto out;
917 }
918
919 if (ea_name) {
920 if (ea_name_len == name_len &&
921 memcmp(ea_name, name, name_len) == 0) {
922 rc = value_len;
923 if (dst_size == 0)
924 goto out;
925 if (dst_size < value_len) {
926 rc = -ERANGE;
927 goto out;
928 }
929 memcpy(dst, value, value_len);
930 goto out;
931 }
932 } else {
933 /* 'user.' plus a terminating null */
934 user_name_len = 5 + 1 + name_len;
935
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000936 if (buf_size == 0) {
937 /* skip copy - calc size only */
938 rc += user_name_len;
939 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000940 dst_size -= user_name_len;
941 memcpy(dst, "user.", 5);
942 dst += 5;
943 memcpy(dst, src->ea_data, name_len);
944 dst += name_len;
945 *dst = 0;
946 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000947 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000948 } else {
949 /* stop before overrun buffer */
950 rc = -ERANGE;
951 break;
952 }
953 }
954
955 if (!src->next_entry_offset)
956 break;
957
958 if (src_size < le32_to_cpu(src->next_entry_offset)) {
959 /* stop before overrun buffer */
960 rc = -ERANGE;
961 break;
962 }
963 src_size -= le32_to_cpu(src->next_entry_offset);
964 src = (void *)((char *)src +
965 le32_to_cpu(src->next_entry_offset));
966 }
967
968 /* didn't find the named attribute */
969 if (ea_name)
970 rc = -ENODATA;
971
972out:
973 return (ssize_t)rc;
974}
975
976static ssize_t
977smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
978 const unsigned char *path, const unsigned char *ea_name,
979 char *ea_data, size_t buf_size,
980 struct cifs_sb_info *cifs_sb)
981{
982 int rc;
983 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000984 struct kvec rsp_iov = {NULL, 0};
985 int buftype = CIFS_NO_BUFFER;
986 struct smb2_query_info_rsp *rsp;
987 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000988
989 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
990 if (!utf16_path)
991 return -ENOMEM;
992
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +1000993 rc = smb2_query_info_compound(xid, tcon, utf16_path,
994 FILE_READ_EA,
995 FILE_FULL_EA_INFORMATION,
996 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +1000997 CIFSMaxBufSize -
998 MAX_SMB2_CREATE_RESPONSE_SIZE -
999 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001000 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001001 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001002 /*
1003 * If ea_name is NULL (listxattr) and there are no EAs,
1004 * return 0 as it's not an error. Otherwise, the specified
1005 * ea_name was not found.
1006 */
1007 if (!ea_name && rc == -ENODATA)
1008 rc = 0;
1009 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001010 }
1011
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001012 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1013 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1014 le32_to_cpu(rsp->OutputBufferLength),
1015 &rsp_iov,
1016 sizeof(struct smb2_file_full_ea_info));
1017 if (rc)
1018 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001019
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001020 info = (struct smb2_file_full_ea_info *)(
1021 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1022 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1023 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001024
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001025 qeas_exit:
1026 kfree(utf16_path);
1027 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001028 return rc;
1029}
1030
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001031
1032static int
1033smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1034 const char *path, const char *ea_name, const void *ea_value,
1035 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1036 struct cifs_sb_info *cifs_sb)
1037{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001038 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001039 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001040 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001041 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001042 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001043 struct smb_rqst rqst[3];
1044 int resp_buftype[3];
1045 struct kvec rsp_iov[3];
1046 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1047 struct cifs_open_parms oparms;
1048 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1049 struct cifs_fid fid;
1050 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1051 unsigned int size[1];
1052 void *data[1];
1053 struct smb2_file_full_ea_info *ea = NULL;
1054 struct kvec close_iov[1];
1055 int rc;
1056
1057 if (smb3_encryption_required(tcon))
1058 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001059
1060 if (ea_name_len > 255)
1061 return -EINVAL;
1062
1063 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1064 if (!utf16_path)
1065 return -ENOMEM;
1066
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001067 memset(rqst, 0, sizeof(rqst));
1068 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1069 memset(rsp_iov, 0, sizeof(rsp_iov));
1070
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001071 if (ses->server->ops->query_all_EAs) {
1072 if (!ea_value) {
1073 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1074 ea_name, NULL, 0,
1075 cifs_sb);
1076 if (rc == -ENODATA)
1077 goto sea_exit;
1078 }
1079 }
1080
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001081 /* Open */
1082 memset(&open_iov, 0, sizeof(open_iov));
1083 rqst[0].rq_iov = open_iov;
1084 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1085
1086 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001087 oparms.tcon = tcon;
1088 oparms.desired_access = FILE_WRITE_EA;
1089 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001090 if (backup_cred(cifs_sb))
1091 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1092 else
1093 oparms.create_options = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001094 oparms.fid = &fid;
1095 oparms.reconnect = false;
1096
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001097 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
1098 if (rc)
1099 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001100 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001101
1102
1103 /* Set Info */
1104 memset(&si_iov, 0, sizeof(si_iov));
1105 rqst[1].rq_iov = si_iov;
1106 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001107
1108 len = sizeof(ea) + ea_name_len + ea_value_len + 1;
1109 ea = kzalloc(len, GFP_KERNEL);
1110 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001111 rc = -ENOMEM;
1112 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001113 }
1114
1115 ea->ea_name_length = ea_name_len;
1116 ea->ea_value_length = cpu_to_le16(ea_value_len);
1117 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1118 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1119
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001120 size[0] = len;
1121 data[0] = ea;
1122
1123 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
1124 COMPOUND_FID, current->tgid,
1125 FILE_FULL_EA_INFORMATION,
1126 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001127 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001128 smb2_set_related(&rqst[1]);
1129
1130
1131 /* Close */
1132 memset(&close_iov, 0, sizeof(close_iov));
1133 rqst[2].rq_iov = close_iov;
1134 rqst[2].rq_nvec = 1;
1135 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
1136 smb2_set_related(&rqst[2]);
1137
1138 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1139 resp_buftype, rsp_iov);
1140
1141 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001142 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001143 kfree(utf16_path);
1144 SMB2_open_free(&rqst[0]);
1145 SMB2_set_info_free(&rqst[1]);
1146 SMB2_close_free(&rqst[2]);
1147 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1148 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1149 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001150 return rc;
1151}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001152#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001153
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001154static bool
1155smb2_can_echo(struct TCP_Server_Info *server)
1156{
1157 return server->echoes;
1158}
1159
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001160static void
1161smb2_clear_stats(struct cifs_tcon *tcon)
1162{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001163 int i;
1164 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1165 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1166 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1167 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001168}
1169
1170static void
Steve French769ee6a2013-06-19 14:15:30 -05001171smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1172{
1173 seq_puts(m, "\n\tShare Capabilities:");
1174 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1175 seq_puts(m, " DFS,");
1176 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1177 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1178 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1179 seq_puts(m, " SCALEOUT,");
1180 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1181 seq_puts(m, " CLUSTER,");
1182 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1183 seq_puts(m, " ASYMMETRIC,");
1184 if (tcon->capabilities == 0)
1185 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001186 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1187 seq_puts(m, " Aligned,");
1188 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1189 seq_puts(m, " Partition Aligned,");
1190 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1191 seq_puts(m, " SSD,");
1192 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1193 seq_puts(m, " TRIM-support,");
1194
Steve French769ee6a2013-06-19 14:15:30 -05001195 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001196 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001197 if (tcon->perf_sector_size)
1198 seq_printf(m, "\tOptimal sector size: 0x%x",
1199 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001200 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001201}
1202
1203static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001204smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1205{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001206 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1207 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001208
1209 /*
1210 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1211 * totals (requests sent) since those SMBs are per-session not per tcon
1212 */
Steve French52ce1ac2018-07-31 01:46:47 -05001213 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1214 (long long)(tcon->bytes_read),
1215 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001216 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1217 atomic_read(&tcon->num_local_opens),
1218 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001219 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001220 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1221 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001222 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001223 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1224 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001225 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001226 atomic_read(&sent[SMB2_CREATE_HE]),
1227 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001228 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001229 atomic_read(&sent[SMB2_CLOSE_HE]),
1230 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001231 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001232 atomic_read(&sent[SMB2_FLUSH_HE]),
1233 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001234 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001235 atomic_read(&sent[SMB2_READ_HE]),
1236 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001237 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001238 atomic_read(&sent[SMB2_WRITE_HE]),
1239 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001240 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001241 atomic_read(&sent[SMB2_LOCK_HE]),
1242 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001243 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001244 atomic_read(&sent[SMB2_IOCTL_HE]),
1245 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001246 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001247 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1248 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001249 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001250 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1251 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001252 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001253 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1254 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001255 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001256 atomic_read(&sent[SMB2_SET_INFO_HE]),
1257 atomic_read(&failed[SMB2_SET_INFO_HE]));
1258 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1259 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1260 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001261}
1262
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001263static void
1264smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1265{
David Howells2b0143b2015-03-17 22:25:59 +00001266 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001267 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1268
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001269 cfile->fid.persistent_fid = fid->persistent_fid;
1270 cfile->fid.volatile_fid = fid->volatile_fid;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001271#ifdef CONFIG_CIFS_DEBUG2
1272 cfile->fid.mid = fid->mid;
1273#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001274 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1275 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001276 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001277 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001278}
1279
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001280static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001281smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1282 struct cifs_fid *fid)
1283{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001284 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001285}
1286
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001287static int
Steve French41c13582013-11-14 00:05:36 -06001288SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1289 u64 persistent_fid, u64 volatile_fid,
1290 struct copychunk_ioctl *pcchunk)
1291{
1292 int rc;
1293 unsigned int ret_data_len;
1294 struct resume_key_req *res_key;
1295
1296 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1297 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001298 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001299 (char **)&res_key, &ret_data_len);
1300
1301 if (rc) {
1302 cifs_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
1303 goto req_res_key_exit;
1304 }
1305 if (ret_data_len < sizeof(struct resume_key_req)) {
1306 cifs_dbg(VFS, "Invalid refcopy resume key length\n");
1307 rc = -EINVAL;
1308 goto req_res_key_exit;
1309 }
1310 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1311
1312req_res_key_exit:
1313 kfree(res_key);
1314 return rc;
1315}
1316
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001317static int
1318smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001319 struct cifs_tcon *tcon,
1320 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001321 unsigned long p)
1322{
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001323 struct cifs_ses *ses = tcon->ses;
1324 char __user *arg = (char __user *)p;
1325 struct smb_query_info qi;
1326 struct smb_query_info __user *pqi;
1327 int rc = 0;
1328 int flags = 0;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001329 struct smb2_query_info_rsp *qi_rsp = NULL;
1330 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001331 void *buffer = NULL;
1332 struct smb_rqst rqst[3];
1333 int resp_buftype[3];
1334 struct kvec rsp_iov[3];
1335 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1336 struct cifs_open_parms oparms;
1337 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1338 struct cifs_fid fid;
1339 struct kvec qi_iov[1];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001340 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001341 struct kvec close_iov[1];
1342
1343 memset(rqst, 0, sizeof(rqst));
1344 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1345 memset(rsp_iov, 0, sizeof(rsp_iov));
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001346
1347 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1348 return -EFAULT;
1349
1350 if (qi.output_buffer_length > 1024)
1351 return -EINVAL;
1352
1353 if (!ses || !(ses->server))
1354 return -EIO;
1355
1356 if (smb3_encryption_required(tcon))
1357 flags |= CIFS_TRANSFORM_REQ;
1358
1359 buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
1360 if (buffer == NULL)
1361 return -ENOMEM;
1362
1363 if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
1364 qi.output_buffer_length)) {
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001365 rc = -EFAULT;
1366 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001367 }
1368
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001369 /* Open */
1370 memset(&open_iov, 0, sizeof(open_iov));
1371 rqst[0].rq_iov = open_iov;
1372 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001373
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001374 memset(&oparms, 0, sizeof(oparms));
1375 oparms.tcon = tcon;
1376 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
1377 oparms.disposition = FILE_OPEN;
1378 if (is_dir)
1379 oparms.create_options = CREATE_NOT_FILE;
1380 else
1381 oparms.create_options = CREATE_NOT_DIR;
1382 oparms.fid = &fid;
1383 oparms.reconnect = false;
1384
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001385 /*
1386 * FSCTL codes encode the special access they need in the fsctl code.
1387 */
1388 if (qi.flags & PASSTHRU_FSCTL) {
1389 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1390 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1391 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
1392 ;
1393 break;
1394 }
1395 }
1396
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001397 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1398 if (rc)
1399 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001400 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001401
1402 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001403 if (qi.flags & PASSTHRU_FSCTL) {
1404 /* Can eventually relax perm check since server enforces too */
1405 if (!capable(CAP_SYS_ADMIN))
1406 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001407 else {
1408 memset(&io_iov, 0, sizeof(io_iov));
1409 rqst[1].rq_iov = io_iov;
1410 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1411
1412 rc = SMB2_ioctl_init(tcon, &rqst[1],
1413 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001414 qi.info_type, true, buffer,
1415 qi.output_buffer_length,
1416 CIFSMaxBufSize);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001417 }
Steve French31ba4332019-03-13 02:40:07 -05001418 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1419 memset(&qi_iov, 0, sizeof(qi_iov));
1420 rqst[1].rq_iov = qi_iov;
1421 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001422
Steve French31ba4332019-03-13 02:40:07 -05001423 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
1424 COMPOUND_FID, qi.file_info_class,
1425 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001426 qi.input_buffer_length,
1427 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001428 } else { /* unknown flags */
1429 cifs_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
1430 rc = -EINVAL;
1431 }
1432
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001433 if (rc)
1434 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001435 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001436 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001437
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001438 /* Close */
1439 memset(&close_iov, 0, sizeof(close_iov));
1440 rqst[2].rq_iov = close_iov;
1441 rqst[2].rq_nvec = 1;
1442
1443 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001444 if (rc)
1445 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001446 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001447
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001448 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1449 resp_buftype, rsp_iov);
1450 if (rc)
1451 goto iqinf_exit;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001452 if (qi.flags & PASSTHRU_FSCTL) {
1453 pqi = (struct smb_query_info __user *)arg;
1454 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1455 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1456 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
1457 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1458 sizeof(qi.input_buffer_length))) {
1459 rc = -EFAULT;
1460 goto iqinf_exit;
1461 }
1462 if (copy_to_user(pqi + 1, &io_rsp[1], qi.input_buffer_length)) {
1463 rc = -EFAULT;
1464 goto iqinf_exit;
1465 }
1466 } else {
1467 pqi = (struct smb_query_info __user *)arg;
1468 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1469 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1470 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
1471 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1472 sizeof(qi.input_buffer_length))) {
1473 rc = -EFAULT;
1474 goto iqinf_exit;
1475 }
1476 if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) {
1477 rc = -EFAULT;
1478 goto iqinf_exit;
1479 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001480 }
1481
1482 iqinf_exit:
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001483 kfree(buffer);
1484 SMB2_open_free(&rqst[0]);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001485 if (qi.flags & PASSTHRU_FSCTL)
1486 SMB2_ioctl_free(&rqst[1]);
1487 else
1488 SMB2_query_info_free(&rqst[1]);
1489
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001490 SMB2_close_free(&rqst[2]);
1491 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1492 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1493 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001494 return rc;
1495}
1496
Sachin Prabhu620d8742017-02-10 16:03:51 +05301497static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001498smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001499 struct cifsFileInfo *srcfile,
1500 struct cifsFileInfo *trgtfile, u64 src_off,
1501 u64 len, u64 dest_off)
1502{
1503 int rc;
1504 unsigned int ret_data_len;
1505 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001506 struct copychunk_ioctl_rsp *retbuf = NULL;
1507 struct cifs_tcon *tcon;
1508 int chunks_copied = 0;
1509 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301510 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001511
1512 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1513
1514 if (pcchunk == NULL)
1515 return -ENOMEM;
1516
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001517 cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n");
Steve French41c13582013-11-14 00:05:36 -06001518 /* Request a key from the server to identify the source of the copy */
1519 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1520 srcfile->fid.persistent_fid,
1521 srcfile->fid.volatile_fid, pcchunk);
1522
1523 /* Note: request_res_key sets res_key null only if rc !=0 */
1524 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001525 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001526
1527 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001528 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001529 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001530 pcchunk->Reserved2 = 0;
1531
Steve French9bf0c9c2013-11-16 18:05:28 -06001532 tcon = tlink_tcon(trgtfile->tlink);
1533
1534 while (len > 0) {
1535 pcchunk->SourceOffset = cpu_to_le64(src_off);
1536 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1537 pcchunk->Length =
1538 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1539
1540 /* Request server copy to target from src identified by key */
1541 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001542 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001543 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001544 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1545 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001546 if (rc == 0) {
1547 if (ret_data_len !=
1548 sizeof(struct copychunk_ioctl_rsp)) {
1549 cifs_dbg(VFS, "invalid cchunk response size\n");
1550 rc = -EIO;
1551 goto cchunk_out;
1552 }
1553 if (retbuf->TotalBytesWritten == 0) {
1554 cifs_dbg(FYI, "no bytes copied\n");
1555 rc = -EIO;
1556 goto cchunk_out;
1557 }
1558 /*
1559 * Check if server claimed to write more than we asked
1560 */
1561 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1562 le32_to_cpu(pcchunk->Length)) {
1563 cifs_dbg(VFS, "invalid copy chunk response\n");
1564 rc = -EIO;
1565 goto cchunk_out;
1566 }
1567 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
1568 cifs_dbg(VFS, "invalid num chunks written\n");
1569 rc = -EIO;
1570 goto cchunk_out;
1571 }
1572 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001573
Sachin Prabhu620d8742017-02-10 16:03:51 +05301574 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1575 src_off += bytes_written;
1576 dest_off += bytes_written;
1577 len -= bytes_written;
1578 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001579
Sachin Prabhu620d8742017-02-10 16:03:51 +05301580 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001581 le32_to_cpu(retbuf->ChunksWritten),
1582 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301583 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001584 } else if (rc == -EINVAL) {
1585 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1586 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001587
Steve French9bf0c9c2013-11-16 18:05:28 -06001588 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1589 le32_to_cpu(retbuf->ChunksWritten),
1590 le32_to_cpu(retbuf->ChunkBytesWritten),
1591 le32_to_cpu(retbuf->TotalBytesWritten));
1592
1593 /*
1594 * Check if this is the first request using these sizes,
1595 * (ie check if copy succeed once with original sizes
1596 * and check if the server gave us different sizes after
1597 * we already updated max sizes on previous request).
1598 * if not then why is the server returning an error now
1599 */
1600 if ((chunks_copied != 0) || chunk_sizes_updated)
1601 goto cchunk_out;
1602
1603 /* Check that server is not asking us to grow size */
1604 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1605 tcon->max_bytes_chunk)
1606 tcon->max_bytes_chunk =
1607 le32_to_cpu(retbuf->ChunkBytesWritten);
1608 else
1609 goto cchunk_out; /* server gave us bogus size */
1610
1611 /* No need to change MaxChunks since already set to 1 */
1612 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001613 } else
1614 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001615 }
1616
1617cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001618 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001619 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301620 if (rc)
1621 return rc;
1622 else
1623 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001624}
1625
1626static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001627smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1628 struct cifs_fid *fid)
1629{
1630 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1631}
1632
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001633static unsigned int
1634smb2_read_data_offset(char *buf)
1635{
1636 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
1637 return rsp->DataOffset;
1638}
1639
1640static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001641smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001642{
1643 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001644
1645 if (in_remaining)
1646 return le32_to_cpu(rsp->DataRemaining);
1647
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001648 return le32_to_cpu(rsp->DataLength);
1649}
1650
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001651
1652static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001653smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001654 struct cifs_io_parms *parms, unsigned int *bytes_read,
1655 char **buf, int *buf_type)
1656{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001657 parms->persistent_fid = pfid->persistent_fid;
1658 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001659 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1660}
1661
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001662static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001663smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001664 struct cifs_io_parms *parms, unsigned int *written,
1665 struct kvec *iov, unsigned long nr_segs)
1666{
1667
Steve Frenchdb8b6312014-09-22 05:13:55 -05001668 parms->persistent_fid = pfid->persistent_fid;
1669 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001670 return SMB2_write(xid, parms, written, iov, nr_segs);
1671}
1672
Steve Frenchd43cc792014-08-13 17:16:29 -05001673/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1674static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1675 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1676{
1677 struct cifsInodeInfo *cifsi;
1678 int rc;
1679
1680 cifsi = CIFS_I(inode);
1681
1682 /* if file already sparse don't bother setting sparse again */
1683 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1684 return true; /* already sparse */
1685
1686 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1687 return true; /* already not sparse */
1688
1689 /*
1690 * Can't check for sparse support on share the usual way via the
1691 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1692 * since Samba server doesn't set the flag on the share, yet
1693 * supports the set sparse FSCTL and returns sparse correctly
1694 * in the file attributes. If we fail setting sparse though we
1695 * mark that server does not support sparse files for this share
1696 * to avoid repeatedly sending the unsupported fsctl to server
1697 * if the file is repeatedly extended.
1698 */
1699 if (tcon->broken_sparse_sup)
1700 return false;
1701
1702 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1703 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001704 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05001705 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001706 if (rc) {
1707 tcon->broken_sparse_sup = true;
1708 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1709 return false;
1710 }
1711
1712 if (setsparse)
1713 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1714 else
1715 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1716
1717 return true;
1718}
1719
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001720static int
1721smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1722 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1723{
1724 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001725 struct inode *inode;
1726
1727 /*
1728 * If extending file more than one page make sparse. Many Linux fs
1729 * make files sparse by default when extending via ftruncate
1730 */
David Howells2b0143b2015-03-17 22:25:59 +00001731 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001732
1733 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001734 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001735
Steve Frenchd43cc792014-08-13 17:16:29 -05001736 /* whether set sparse succeeds or not, extend the file */
1737 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001738 }
1739
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001740 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001741 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001742}
1743
Steve French02b16662015-06-27 21:18:36 -07001744static int
1745smb2_duplicate_extents(const unsigned int xid,
1746 struct cifsFileInfo *srcfile,
1747 struct cifsFileInfo *trgtfile, u64 src_off,
1748 u64 len, u64 dest_off)
1749{
1750 int rc;
1751 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001752 struct duplicate_extents_to_file dup_ext_buf;
1753 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1754
1755 /* server fileays advertise duplicate extent support with this flag */
1756 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1757 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1758 return -EOPNOTSUPP;
1759
1760 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1761 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1762 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1763 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1764 dup_ext_buf.ByteCount = cpu_to_le64(len);
1765 cifs_dbg(FYI, "duplicate extents: src off %lld dst off %lld len %lld",
1766 src_off, dest_off, len);
1767
1768 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1769 if (rc)
1770 goto duplicate_extents_out;
1771
1772 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1773 trgtfile->fid.volatile_fid,
1774 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001775 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001776 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001777 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05001778 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07001779 &ret_data_len);
1780
1781 if (ret_data_len > 0)
1782 cifs_dbg(FYI, "non-zero response length in duplicate extents");
1783
1784duplicate_extents_out:
1785 return rc;
1786}
Steve French02b16662015-06-27 21:18:36 -07001787
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001788static int
Steve French64a5cfa2013-10-14 15:31:32 -05001789smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1790 struct cifsFileInfo *cfile)
1791{
1792 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1793 cfile->fid.volatile_fid);
1794}
1795
1796static int
Steve Frenchb3152e22015-06-24 03:17:02 -05001797smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1798 struct cifsFileInfo *cfile)
1799{
1800 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05001801 unsigned int ret_data_len;
1802
1803 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1804 integr_info.Flags = 0;
1805 integr_info.Reserved = 0;
1806
1807 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1808 cfile->fid.volatile_fid,
1809 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001810 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001811 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05001812 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05001813 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05001814 &ret_data_len);
1815
1816}
1817
Steve Frenche02789a2018-08-09 14:33:12 -05001818/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1819#define GMT_TOKEN_SIZE 50
1820
Steve French153322f2019-03-28 22:32:49 -05001821#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1822
Steve Frenche02789a2018-08-09 14:33:12 -05001823/*
1824 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1825 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1826 */
Steve Frenchb3152e22015-06-24 03:17:02 -05001827static int
Steve French834170c2016-09-30 21:14:26 -05001828smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1829 struct cifsFileInfo *cfile, void __user *ioc_buf)
1830{
1831 char *retbuf = NULL;
1832 unsigned int ret_data_len = 0;
1833 int rc;
Steve French153322f2019-03-28 22:32:49 -05001834 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05001835 struct smb_snapshot_array snapshot_in;
1836
Steve French973189a2019-04-04 00:41:04 -05001837 /*
1838 * On the first query to enumerate the list of snapshots available
1839 * for this volume the buffer begins with 0 (number of snapshots
1840 * which can be returned is zero since at that point we do not know
1841 * how big the buffer needs to be). On the second query,
1842 * it (ret_data_len) is set to number of snapshots so we can
1843 * know to set the maximum response size larger (see below).
1844 */
Steve French153322f2019-03-28 22:32:49 -05001845 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
1846 return -EFAULT;
1847
1848 /*
1849 * Note that for snapshot queries that servers like Azure expect that
1850 * the first query be minimal size (and just used to get the number/size
1851 * of previous versions) so response size must be specified as EXACTLY
1852 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
1853 * of eight bytes.
1854 */
1855 if (ret_data_len == 0)
1856 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
1857 else
1858 max_response_size = CIFSMaxBufSize;
1859
Steve French834170c2016-09-30 21:14:26 -05001860 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1861 cfile->fid.volatile_fid,
1862 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001863 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001864 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05001865 (char **)&retbuf,
1866 &ret_data_len);
1867 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
1868 rc, ret_data_len);
1869 if (rc)
1870 return rc;
1871
1872 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
1873 /* Fixup buffer */
1874 if (copy_from_user(&snapshot_in, ioc_buf,
1875 sizeof(struct smb_snapshot_array))) {
1876 rc = -EFAULT;
1877 kfree(retbuf);
1878 return rc;
1879 }
Steve French834170c2016-09-30 21:14:26 -05001880
Steve Frenche02789a2018-08-09 14:33:12 -05001881 /*
1882 * Check for min size, ie not large enough to fit even one GMT
1883 * token (snapshot). On the first ioctl some users may pass in
1884 * smaller size (or zero) to simply get the size of the array
1885 * so the user space caller can allocate sufficient memory
1886 * and retry the ioctl again with larger array size sufficient
1887 * to hold all of the snapshot GMT tokens on the second try.
1888 */
1889 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
1890 ret_data_len = sizeof(struct smb_snapshot_array);
1891
1892 /*
1893 * We return struct SRV_SNAPSHOT_ARRAY, followed by
1894 * the snapshot array (of 50 byte GMT tokens) each
1895 * representing an available previous version of the data
1896 */
1897 if (ret_data_len > (snapshot_in.snapshot_array_size +
1898 sizeof(struct smb_snapshot_array)))
1899 ret_data_len = snapshot_in.snapshot_array_size +
1900 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05001901
1902 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
1903 rc = -EFAULT;
1904 }
1905
1906 kfree(retbuf);
1907 return rc;
1908}
1909
1910static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001911smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
1912 const char *path, struct cifs_sb_info *cifs_sb,
1913 struct cifs_fid *fid, __u16 search_flags,
1914 struct cifs_search_info *srch_inf)
1915{
1916 __le16 *utf16_path;
1917 int rc;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001918 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001919 struct cifs_open_parms oparms;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001920
1921 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1922 if (!utf16_path)
1923 return -ENOMEM;
1924
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001925 oparms.tcon = tcon;
1926 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
1927 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001928 if (backup_cred(cifs_sb))
1929 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1930 else
1931 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001932 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001933 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001934
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10001935 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001936 kfree(utf16_path);
1937 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07001938 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001939 return rc;
1940 }
1941
1942 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02001943 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001944
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001945 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
1946 fid->volatile_fid, 0, srch_inf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001947 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07001948 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001949 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001950 }
1951 return rc;
1952}
1953
1954static int
1955smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
1956 struct cifs_fid *fid, __u16 search_flags,
1957 struct cifs_search_info *srch_inf)
1958{
1959 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
1960 fid->volatile_fid, 0, srch_inf);
1961}
1962
1963static int
1964smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
1965 struct cifs_fid *fid)
1966{
1967 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1968}
1969
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001970/*
1971* If we negotiate SMB2 protocol and get STATUS_PENDING - update
1972* the number of credits and return true. Otherwise - return false.
1973*/
1974static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08001975smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001976{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10001977 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001978
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07001979 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001980 return false;
1981
Pavel Shilovsky66265f12019-01-23 17:11:16 -08001982 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001983 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07001984 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001985 spin_unlock(&server->req_lock);
1986 wake_up(&server->request_q);
1987 }
1988
1989 return true;
1990}
1991
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07001992static bool
1993smb2_is_session_expired(char *buf)
1994{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10001995 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07001996
Mark Symsd81243c2018-05-24 09:47:31 +01001997 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
1998 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07001999 return false;
2000
Steve Frenche68a9322018-07-30 14:23:58 -05002001 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2002 le16_to_cpu(shdr->Command),
2003 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002004 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002005
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002006 return true;
2007}
2008
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002009static int
2010smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2011 struct cifsInodeInfo *cinode)
2012{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002013 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2014 return SMB2_lease_break(0, tcon, cinode->lease_key,
2015 smb2_get_lease_state(cinode));
2016
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002017 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2018 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002019 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002020}
2021
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002022void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002023smb2_set_related(struct smb_rqst *rqst)
2024{
2025 struct smb2_sync_hdr *shdr;
2026
2027 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2028 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2029}
2030
2031char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2032
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002033void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002034smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002035{
2036 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002037 struct cifs_ses *ses = tcon->ses;
2038 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002039 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002040 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002041
2042 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002043
2044 /* No padding needed */
2045 if (!(len & 7))
2046 goto finished;
2047
2048 num_padding = 8 - (len & 7);
2049 if (!smb3_encryption_required(tcon)) {
2050 /*
2051 * If we do not have encryption then we can just add an extra
2052 * iov for the padding.
2053 */
2054 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2055 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2056 rqst->rq_nvec++;
2057 len += num_padding;
2058 } else {
2059 /*
2060 * We can not add a small padding iov for the encryption case
2061 * because the encryption framework can not handle the padding
2062 * iovs.
2063 * We have to flatten this into a single buffer and add
2064 * the padding to it.
2065 */
2066 for (i = 1; i < rqst->rq_nvec; i++) {
2067 memcpy(rqst->rq_iov[0].iov_base +
2068 rqst->rq_iov[0].iov_len,
2069 rqst->rq_iov[i].iov_base,
2070 rqst->rq_iov[i].iov_len);
2071 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002072 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002073 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2074 0, num_padding);
2075 rqst->rq_iov[0].iov_len += num_padding;
2076 len += num_padding;
2077 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002078 }
2079
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002080 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002081 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2082 shdr->NextCommand = cpu_to_le32(len);
2083}
2084
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002085/*
2086 * Passes the query info response back to the caller on success.
2087 * Caller need to free this with free_rsp_buf().
2088 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002089int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002090smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2091 __le16 *utf16_path, u32 desired_access,
2092 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002093 struct kvec *rsp, int *buftype,
2094 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002095{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002096 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002097 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002098 struct smb_rqst rqst[3];
2099 int resp_buftype[3];
2100 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002101 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002102 struct kvec qi_iov[1];
2103 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002104 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002105 struct cifs_open_parms oparms;
2106 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002107 int rc;
2108
2109 if (smb3_encryption_required(tcon))
2110 flags |= CIFS_TRANSFORM_REQ;
2111
2112 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002113 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002114 memset(rsp_iov, 0, sizeof(rsp_iov));
2115
2116 memset(&open_iov, 0, sizeof(open_iov));
2117 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002118 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002119
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002120 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002121 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002122 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002123 if (cifs_sb && backup_cred(cifs_sb))
2124 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2125 else
2126 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002127 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002128 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002129
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002130 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002131 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002132 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002133 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002134
2135 memset(&qi_iov, 0, sizeof(qi_iov));
2136 rqst[1].rq_iov = qi_iov;
2137 rqst[1].rq_nvec = 1;
2138
2139 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002140 class, type, 0,
2141 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002142 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002143 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002144 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002145 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002146 smb2_set_related(&rqst[1]);
2147
2148 memset(&close_iov, 0, sizeof(close_iov));
2149 rqst[2].rq_iov = close_iov;
2150 rqst[2].rq_nvec = 1;
2151
2152 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2153 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002154 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002155 smb2_set_related(&rqst[2]);
2156
2157 rc = compound_send_recv(xid, ses, flags, 3, rqst,
2158 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002159 if (rc) {
2160 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002161 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002162 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002163 *rsp = rsp_iov[1];
2164 *buftype = resp_buftype[1];
2165
2166 qic_exit:
2167 SMB2_open_free(&rqst[0]);
2168 SMB2_query_info_free(&rqst[1]);
2169 SMB2_close_free(&rqst[2]);
2170 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2171 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2172 return rc;
2173}
2174
2175static int
2176smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2177 struct kstatfs *buf)
2178{
2179 struct smb2_query_info_rsp *rsp;
2180 struct smb2_fs_full_size_info *info = NULL;
2181 __le16 utf16_path = 0; /* Null - open root of share */
2182 struct kvec rsp_iov = {NULL, 0};
2183 int buftype = CIFS_NO_BUFFER;
2184 int rc;
2185
2186
2187 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2188 FILE_READ_ATTRIBUTES,
2189 FS_FULL_SIZE_INFORMATION,
2190 SMB2_O_INFO_FILESYSTEM,
2191 sizeof(struct smb2_fs_full_size_info),
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002192 &rsp_iov, &buftype, NULL);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002193 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002194 goto qfs_exit;
2195
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002196 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002197 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002198 info = (struct smb2_fs_full_size_info *)(
2199 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2200 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2201 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002202 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002203 sizeof(struct smb2_fs_full_size_info));
2204 if (!rc)
2205 smb2_copy_fs_info_to_kstatfs(info, buf);
2206
2207qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002208 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002209 return rc;
2210}
2211
Steve French2d304212018-06-24 23:28:12 -05002212static int
2213smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2214 struct kstatfs *buf)
2215{
2216 int rc;
2217 __le16 srch_path = 0; /* Null - open root of share */
2218 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2219 struct cifs_open_parms oparms;
2220 struct cifs_fid fid;
2221
2222 if (!tcon->posix_extensions)
2223 return smb2_queryfs(xid, tcon, buf);
2224
2225 oparms.tcon = tcon;
2226 oparms.desired_access = FILE_READ_ATTRIBUTES;
2227 oparms.disposition = FILE_OPEN;
2228 oparms.create_options = 0;
2229 oparms.fid = &fid;
2230 oparms.reconnect = false;
2231
2232 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
2233 if (rc)
2234 return rc;
2235
2236 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2237 fid.volatile_fid, buf);
2238 buf->f_type = SMB2_MAGIC_NUMBER;
2239 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2240 return rc;
2241}
Steve French2d304212018-06-24 23:28:12 -05002242
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002243static bool
2244smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2245{
2246 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2247 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2248}
2249
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002250static int
2251smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2252 __u64 length, __u32 type, int lock, int unlock, bool wait)
2253{
2254 if (unlock && !lock)
2255 type = SMB2_LOCKFLAG_UNLOCK;
2256 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2257 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2258 current->tgid, length, offset, type, wait);
2259}
2260
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002261static void
2262smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2263{
2264 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2265}
2266
2267static void
2268smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2269{
2270 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2271}
2272
2273static void
2274smb2_new_lease_key(struct cifs_fid *fid)
2275{
Steve Frenchfa70b872016-09-22 00:39:34 -05002276 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002277}
2278
Aurelien Aptel9d496402017-02-13 16:16:49 +01002279static int
2280smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2281 const char *search_name,
2282 struct dfs_info3_param **target_nodes,
2283 unsigned int *num_of_nodes,
2284 const struct nls_table *nls_codepage, int remap)
2285{
2286 int rc;
2287 __le16 *utf16_path = NULL;
2288 int utf16_path_len = 0;
2289 struct cifs_tcon *tcon;
2290 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2291 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2292 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2293
2294 cifs_dbg(FYI, "smb2_get_dfs_refer path <%s>\n", search_name);
2295
2296 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002297 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002298 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002299 tcon = ses->tcon_ipc;
2300 if (tcon == NULL) {
2301 spin_lock(&cifs_tcp_ses_lock);
2302 tcon = list_first_entry_or_null(&ses->tcon_list,
2303 struct cifs_tcon,
2304 tcon_list);
2305 if (tcon)
2306 tcon->tc_count++;
2307 spin_unlock(&cifs_tcp_ses_lock);
2308 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002309
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002310 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002311 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2312 ses);
2313 rc = -ENOTCONN;
2314 goto out;
2315 }
2316
2317 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2318 &utf16_path_len,
2319 nls_codepage, remap);
2320 if (!utf16_path) {
2321 rc = -ENOMEM;
2322 goto out;
2323 }
2324
2325 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2326 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2327 if (!dfs_req) {
2328 rc = -ENOMEM;
2329 goto out;
2330 }
2331
2332 /* Highest DFS referral version understood */
2333 dfs_req->MaxReferralLevel = DFS_VERSION;
2334
2335 /* Path to resolve in an UTF-16 null-terminated string */
2336 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2337
2338 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002339 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2340 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002341 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002342 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002343 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002344 } while (rc == -EAGAIN);
2345
2346 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002347 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Aurelien Aptel57025912017-11-21 14:47:56 +01002348 cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002349 goto out;
2350 }
2351
2352 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2353 num_of_nodes, target_nodes,
2354 nls_codepage, remap, search_name,
2355 true /* is_unicode */);
2356 if (rc) {
2357 cifs_dbg(VFS, "parse error in smb2_get_dfs_refer rc=%d\n", rc);
2358 goto out;
2359 }
2360
2361 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002362 if (tcon && !tcon->ipc) {
2363 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002364 spin_lock(&cifs_tcp_ses_lock);
2365 tcon->tc_count--;
2366 spin_unlock(&cifs_tcp_ses_lock);
2367 }
2368 kfree(utf16_path);
2369 kfree(dfs_req);
2370 kfree(dfs_rsp);
2371 return rc;
2372}
Pavel Shilovsky78932422016-07-24 10:37:38 +03002373#define SMB2_SYMLINK_STRUCT_SIZE \
2374 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2375
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002376static int
2377smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2378 const char *full_path, char **target_path,
2379 struct cifs_sb_info *cifs_sb)
2380{
2381 int rc;
2382 __le16 *utf16_path;
2383 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2384 struct cifs_open_parms oparms;
2385 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002386 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002387 struct smb2_err_rsp *err_buf = NULL;
2388 int resp_buftype;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002389 struct smb2_symlink_err_rsp *symlink;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002390 unsigned int sub_len;
2391 unsigned int sub_offset;
2392 unsigned int print_len;
2393 unsigned int print_offset;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002394
2395 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2396
2397 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2398 if (!utf16_path)
2399 return -ENOMEM;
2400
2401 oparms.tcon = tcon;
2402 oparms.desired_access = FILE_READ_ATTRIBUTES;
2403 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05002404 if (backup_cred(cifs_sb))
2405 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2406 else
2407 oparms.create_options = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002408 oparms.fid = &fid;
2409 oparms.reconnect = false;
2410
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002411 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
2412 &resp_buftype);
Ronnie Sahlberge6d0fb72019-04-10 07:47:22 +10002413 if (!rc)
2414 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002415 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002416 rc = -ENOENT;
Dan Carpenterff361fd2018-06-19 15:25:30 +03002417 goto free_path;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002418 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002419
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002420 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002421 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002422 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002423 rc = -ENOENT;
2424 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002425 }
2426
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002427 /* open must fail on symlink - reset rc */
2428 rc = 0;
2429 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2430 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2431 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002432 print_len = le16_to_cpu(symlink->PrintNameLength);
2433 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2434
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002435 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002436 rc = -ENOENT;
2437 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002438 }
2439
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002440 if (err_iov.iov_len <
2441 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002442 rc = -ENOENT;
2443 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002444 }
2445
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002446 *target_path = cifs_strndup_from_utf16(
2447 (char *)symlink->PathBuffer + sub_offset,
2448 sub_len, true, cifs_sb->local_nls);
2449 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002450 rc = -ENOMEM;
2451 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002452 }
2453 convert_delimiter(*target_path, '/');
2454 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002455
2456 querty_exit:
2457 free_rsp_buf(resp_buftype, err_buf);
Dan Carpenterff361fd2018-06-19 15:25:30 +03002458 free_path:
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002459 kfree(utf16_path);
2460 return rc;
2461}
2462
Arnd Bergmann84908422017-06-27 17:06:13 +02002463#ifdef CONFIG_CIFS_ACL
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002464static struct cifs_ntsd *
2465get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2466 const struct cifs_fid *cifsfid, u32 *pacllen)
2467{
2468 struct cifs_ntsd *pntsd = NULL;
2469 unsigned int xid;
2470 int rc = -EOPNOTSUPP;
2471 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2472
2473 if (IS_ERR(tlink))
2474 return ERR_CAST(tlink);
2475
2476 xid = get_xid();
2477 cifs_dbg(FYI, "trying to get acl\n");
2478
2479 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2480 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2481 free_xid(xid);
2482
2483 cifs_put_tlink(tlink);
2484
2485 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2486 if (rc)
2487 return ERR_PTR(rc);
2488 return pntsd;
2489
2490}
2491
2492static struct cifs_ntsd *
2493get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2494 const char *path, u32 *pacllen)
2495{
2496 struct cifs_ntsd *pntsd = NULL;
2497 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2498 unsigned int xid;
2499 int rc;
2500 struct cifs_tcon *tcon;
2501 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2502 struct cifs_fid fid;
2503 struct cifs_open_parms oparms;
2504 __le16 *utf16_path;
2505
2506 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
2507 if (IS_ERR(tlink))
2508 return ERR_CAST(tlink);
2509
2510 tcon = tlink_tcon(tlink);
2511 xid = get_xid();
2512
2513 if (backup_cred(cifs_sb))
Colin Ian King709340a2017-07-05 13:47:34 +01002514 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002515 else
2516 oparms.create_options = 0;
2517
2518 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002519 if (!utf16_path) {
2520 rc = -ENOMEM;
2521 free_xid(xid);
2522 return ERR_PTR(rc);
2523 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002524
2525 oparms.tcon = tcon;
2526 oparms.desired_access = READ_CONTROL;
2527 oparms.disposition = FILE_OPEN;
2528 oparms.fid = &fid;
2529 oparms.reconnect = false;
2530
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002531 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002532 kfree(utf16_path);
2533 if (!rc) {
2534 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2535 fid.volatile_fid, (void **)&pntsd, pacllen);
2536 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2537 }
2538
2539 cifs_put_tlink(tlink);
2540 free_xid(xid);
2541
2542 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2543 if (rc)
2544 return ERR_PTR(rc);
2545 return pntsd;
2546}
2547
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002548#ifdef CONFIG_CIFS_ACL
2549static int
2550set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2551 struct inode *inode, const char *path, int aclflag)
2552{
2553 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2554 unsigned int xid;
2555 int rc, access_flags = 0;
2556 struct cifs_tcon *tcon;
2557 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2558 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2559 struct cifs_fid fid;
2560 struct cifs_open_parms oparms;
2561 __le16 *utf16_path;
2562
2563 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
2564 if (IS_ERR(tlink))
2565 return PTR_ERR(tlink);
2566
2567 tcon = tlink_tcon(tlink);
2568 xid = get_xid();
2569
2570 if (backup_cred(cifs_sb))
2571 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2572 else
2573 oparms.create_options = 0;
2574
2575 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
2576 access_flags = WRITE_OWNER;
2577 else
2578 access_flags = WRITE_DAC;
2579
2580 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002581 if (!utf16_path) {
2582 rc = -ENOMEM;
2583 free_xid(xid);
2584 return rc;
2585 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002586
2587 oparms.tcon = tcon;
2588 oparms.desired_access = access_flags;
2589 oparms.disposition = FILE_OPEN;
2590 oparms.path = path;
2591 oparms.fid = &fid;
2592 oparms.reconnect = false;
2593
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002594 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002595 kfree(utf16_path);
2596 if (!rc) {
2597 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2598 fid.volatile_fid, pnntsd, acllen, aclflag);
2599 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2600 }
2601
2602 cifs_put_tlink(tlink);
2603 free_xid(xid);
2604 return rc;
2605}
2606#endif /* CIFS_ACL */
2607
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002608/* Retrieve an ACL from the server */
2609static struct cifs_ntsd *
2610get_smb2_acl(struct cifs_sb_info *cifs_sb,
2611 struct inode *inode, const char *path,
2612 u32 *pacllen)
2613{
2614 struct cifs_ntsd *pntsd = NULL;
2615 struct cifsFileInfo *open_file = NULL;
2616
2617 if (inode)
2618 open_file = find_readable_file(CIFS_I(inode), true);
2619 if (!open_file)
2620 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
2621
2622 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
2623 cifsFileInfo_put(open_file);
2624 return pntsd;
2625}
Arnd Bergmann84908422017-06-27 17:06:13 +02002626#endif
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002627
Steve French30175622014-08-17 18:16:40 -05002628static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2629 loff_t offset, loff_t len, bool keep_size)
2630{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002631 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05002632 struct inode *inode;
2633 struct cifsInodeInfo *cifsi;
2634 struct cifsFileInfo *cfile = file->private_data;
2635 struct file_zero_data_information fsctl_buf;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002636 struct smb_rqst rqst[2];
2637 int resp_buftype[2];
2638 struct kvec rsp_iov[2];
2639 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
2640 struct kvec si_iov[1];
2641 unsigned int size[1];
2642 void *data[1];
Steve French30175622014-08-17 18:16:40 -05002643 long rc;
2644 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002645 int num = 0, flags = 0;
2646 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05002647
2648 xid = get_xid();
2649
David Howells2b0143b2015-03-17 22:25:59 +00002650 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05002651 cifsi = CIFS_I(inode);
2652
Steve French779ede02019-03-13 01:41:49 -05002653 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
2654 ses->Suid, offset, len);
2655
2656
Steve French30175622014-08-17 18:16:40 -05002657 /* if file not oplocked can't be sure whether asking to extend size */
2658 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002659 if (keep_size == false) {
2660 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002661 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
2662 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002663 free_xid(xid);
2664 return rc;
2665 }
Steve French30175622014-08-17 18:16:40 -05002666
Steve French2bb93d22014-08-20 18:56:29 -05002667 /*
Steve French30175622014-08-17 18:16:40 -05002668 * Must check if file sparse since fallocate -z (zero range) assumes
2669 * non-sparse allocation
2670 */
Steve Frenchcfe89092018-05-19 02:04:55 -05002671 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
2672 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002673 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
2674 ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002675 free_xid(xid);
2676 return rc;
2677 }
Steve French30175622014-08-17 18:16:40 -05002678
Steve French30175622014-08-17 18:16:40 -05002679 cifs_dbg(FYI, "offset %lld len %lld", offset, len);
2680
2681 fsctl_buf.FileOffset = cpu_to_le64(offset);
2682 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2683
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002684 if (smb3_encryption_required(tcon))
2685 flags |= CIFS_TRANSFORM_REQ;
2686
2687 memset(rqst, 0, sizeof(rqst));
2688 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
2689 memset(rsp_iov, 0, sizeof(rsp_iov));
2690
2691
2692 memset(&io_iov, 0, sizeof(io_iov));
2693 rqst[num].rq_iov = io_iov;
2694 rqst[num].rq_nvec = SMB2_IOCTL_IOV_SIZE;
2695 rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid,
2696 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
2697 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05002698 sizeof(struct file_zero_data_information),
2699 CIFSMaxBufSize);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002700 if (rc)
2701 goto zero_range_exit;
2702
2703 /*
2704 * do we also need to change the size of the file?
2705 */
2706 if (keep_size == false && i_size_read(inode) < offset + len) {
2707 smb2_set_next_command(tcon, &rqst[0]);
2708
2709 memset(&si_iov, 0, sizeof(si_iov));
2710 rqst[num].rq_iov = si_iov;
2711 rqst[num].rq_nvec = 1;
2712
2713 eof = cpu_to_le64(offset + len);
2714 size[0] = 8; /* sizeof __le64 */
2715 data[0] = &eof;
2716
2717 rc = SMB2_set_info_init(tcon, &rqst[num++],
2718 cfile->fid.persistent_fid,
2719 cfile->fid.persistent_fid,
2720 current->tgid,
2721 FILE_END_OF_FILE_INFORMATION,
2722 SMB2_O_INFO_FILE, 0, data, size);
2723 smb2_set_related(&rqst[1]);
2724 }
2725
2726 rc = compound_send_recv(xid, ses, flags, num, rqst,
2727 resp_buftype, rsp_iov);
2728
2729 zero_range_exit:
2730 SMB2_ioctl_free(&rqst[0]);
2731 SMB2_set_info_free(&rqst[1]);
2732 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2733 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French30175622014-08-17 18:16:40 -05002734 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05002735 if (rc)
2736 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
2737 ses->Suid, offset, len, rc);
2738 else
2739 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
2740 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05002741 return rc;
2742}
2743
Steve French31742c52014-08-17 08:38:47 -05002744static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2745 loff_t offset, loff_t len)
2746{
2747 struct inode *inode;
2748 struct cifsInodeInfo *cifsi;
2749 struct cifsFileInfo *cfile = file->private_data;
2750 struct file_zero_data_information fsctl_buf;
2751 long rc;
2752 unsigned int xid;
2753 __u8 set_sparse = 1;
2754
2755 xid = get_xid();
2756
David Howells2b0143b2015-03-17 22:25:59 +00002757 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05002758 cifsi = CIFS_I(inode);
2759
2760 /* Need to make file sparse, if not already, before freeing range. */
2761 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05002762 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
2763 rc = -EOPNOTSUPP;
2764 free_xid(xid);
2765 return rc;
2766 }
Steve French31742c52014-08-17 08:38:47 -05002767
2768 cifs_dbg(FYI, "offset %lld len %lld", offset, len);
2769
2770 fsctl_buf.FileOffset = cpu_to_le64(offset);
2771 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2772
2773 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2774 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002775 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05002776 sizeof(struct file_zero_data_information),
2777 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05002778 free_xid(xid);
2779 return rc;
2780}
2781
Steve French9ccf3212014-10-18 17:01:15 -05002782static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
2783 loff_t off, loff_t len, bool keep_size)
2784{
2785 struct inode *inode;
2786 struct cifsInodeInfo *cifsi;
2787 struct cifsFileInfo *cfile = file->private_data;
2788 long rc = -EOPNOTSUPP;
2789 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10002790 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05002791
2792 xid = get_xid();
2793
David Howells2b0143b2015-03-17 22:25:59 +00002794 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05002795 cifsi = CIFS_I(inode);
2796
Steve French779ede02019-03-13 01:41:49 -05002797 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
2798 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05002799 /* if file not oplocked can't be sure whether asking to extend size */
2800 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002801 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05002802 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
2803 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002804 free_xid(xid);
2805 return rc;
2806 }
Steve French9ccf3212014-10-18 17:01:15 -05002807
2808 /*
2809 * Files are non-sparse by default so falloc may be a no-op
2810 * Must check if file sparse. If not sparse, and not extending
2811 * then no need to do anything since file already allocated
2812 */
2813 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
2814 if (keep_size == true)
Steve Frenchcfe89092018-05-19 02:04:55 -05002815 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05002816 /* check if extending file */
2817 else if (i_size_read(inode) >= off + len)
2818 /* not extending file and already not sparse */
Steve Frenchcfe89092018-05-19 02:04:55 -05002819 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05002820 /* BB: in future add else clause to extend file */
2821 else
Steve Frenchcfe89092018-05-19 02:04:55 -05002822 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002823 if (rc)
2824 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
2825 tcon->tid, tcon->ses->Suid, off, len, rc);
2826 else
2827 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid,
2828 tcon->tid, tcon->ses->Suid, off, len);
Steve Frenchcfe89092018-05-19 02:04:55 -05002829 free_xid(xid);
2830 return rc;
Steve French9ccf3212014-10-18 17:01:15 -05002831 }
2832
2833 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
2834 /*
2835 * Check if falloc starts within first few pages of file
2836 * and ends within a few pages of the end of file to
2837 * ensure that most of file is being forced to be
2838 * fallocated now. If so then setting whole file sparse
2839 * ie potentially making a few extra pages at the beginning
2840 * or end of the file non-sparse via set_sparse is harmless.
2841 */
Steve Frenchcfe89092018-05-19 02:04:55 -05002842 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
2843 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002844 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
2845 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002846 free_xid(xid);
2847 return rc;
2848 }
Steve French9ccf3212014-10-18 17:01:15 -05002849
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10002850 smb2_set_sparse(xid, tcon, cfile, inode, false);
2851 rc = 0;
2852 } else {
2853 smb2_set_sparse(xid, tcon, cfile, inode, false);
2854 rc = 0;
2855 if (i_size_read(inode) < off + len) {
2856 eof = cpu_to_le64(off + len);
2857 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
2858 cfile->fid.volatile_fid, cfile->pid,
2859 &eof);
2860 }
Steve French9ccf3212014-10-18 17:01:15 -05002861 }
Steve French9ccf3212014-10-18 17:01:15 -05002862
Steve French779ede02019-03-13 01:41:49 -05002863 if (rc)
2864 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
2865 tcon->ses->Suid, off, len, rc);
2866 else
2867 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
2868 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05002869
2870 free_xid(xid);
2871 return rc;
2872}
2873
2874
Steve French31742c52014-08-17 08:38:47 -05002875static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
2876 loff_t off, loff_t len)
2877{
2878 /* KEEP_SIZE already checked for by do_fallocate */
2879 if (mode & FALLOC_FL_PUNCH_HOLE)
2880 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05002881 else if (mode & FALLOC_FL_ZERO_RANGE) {
2882 if (mode & FALLOC_FL_KEEP_SIZE)
2883 return smb3_zero_range(file, tcon, off, len, true);
2884 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05002885 } else if (mode == FALLOC_FL_KEEP_SIZE)
2886 return smb3_simple_falloc(file, tcon, off, len, true);
2887 else if (mode == 0)
2888 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05002889
2890 return -EOPNOTSUPP;
2891}
2892
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002893static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002894smb2_downgrade_oplock(struct TCP_Server_Info *server,
2895 struct cifsInodeInfo *cinode, bool set_level2)
2896{
2897 if (set_level2)
2898 server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
2899 0, NULL);
2900 else
2901 server->ops->set_oplock_level(cinode, 0, 0, NULL);
2902}
2903
2904static void
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08002905smb21_downgrade_oplock(struct TCP_Server_Info *server,
2906 struct cifsInodeInfo *cinode, bool set_level2)
2907{
2908 server->ops->set_oplock_level(cinode,
2909 set_level2 ? SMB2_LEASE_READ_CACHING_HE :
2910 0, 0, NULL);
2911}
2912
2913static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002914smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
2915 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002916{
2917 oplock &= 0xFF;
2918 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
2919 return;
2920 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002921 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002922 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
2923 &cinode->vfs_inode);
2924 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002925 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002926 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
2927 &cinode->vfs_inode);
2928 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
2929 cinode->oplock = CIFS_CACHE_READ_FLG;
2930 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
2931 &cinode->vfs_inode);
2932 } else
2933 cinode->oplock = 0;
2934}
2935
2936static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002937smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
2938 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04002939{
2940 char message[5] = {0};
2941
2942 oplock &= 0xFF;
2943 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
2944 return;
2945
2946 cinode->oplock = 0;
2947 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
2948 cinode->oplock |= CIFS_CACHE_READ_FLG;
2949 strcat(message, "R");
2950 }
2951 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
2952 cinode->oplock |= CIFS_CACHE_HANDLE_FLG;
2953 strcat(message, "H");
2954 }
2955 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
2956 cinode->oplock |= CIFS_CACHE_WRITE_FLG;
2957 strcat(message, "W");
2958 }
2959 if (!cinode->oplock)
2960 strcat(message, "None");
2961 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
2962 &cinode->vfs_inode);
2963}
2964
Pavel Shilovsky42873b02013-09-05 21:30:16 +04002965static void
2966smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
2967 unsigned int epoch, bool *purge_cache)
2968{
2969 unsigned int old_oplock = cinode->oplock;
2970
2971 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
2972
2973 if (purge_cache) {
2974 *purge_cache = false;
2975 if (old_oplock == CIFS_CACHE_READ_FLG) {
2976 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
2977 (epoch - cinode->epoch > 0))
2978 *purge_cache = true;
2979 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
2980 (epoch - cinode->epoch > 1))
2981 *purge_cache = true;
2982 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
2983 (epoch - cinode->epoch > 1))
2984 *purge_cache = true;
2985 else if (cinode->oplock == 0 &&
2986 (epoch - cinode->epoch > 0))
2987 *purge_cache = true;
2988 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
2989 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
2990 (epoch - cinode->epoch > 0))
2991 *purge_cache = true;
2992 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
2993 (epoch - cinode->epoch > 1))
2994 *purge_cache = true;
2995 }
2996 cinode->epoch = epoch;
2997 }
2998}
2999
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003000static bool
3001smb2_is_read_op(__u32 oplock)
3002{
3003 return oplock == SMB2_OPLOCK_LEVEL_II;
3004}
3005
3006static bool
3007smb21_is_read_op(__u32 oplock)
3008{
3009 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3010 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3011}
3012
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003013static __le32
3014map_oplock_to_lease(u8 oplock)
3015{
3016 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3017 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3018 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3019 return SMB2_LEASE_READ_CACHING;
3020 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3021 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3022 SMB2_LEASE_WRITE_CACHING;
3023 return 0;
3024}
3025
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003026static char *
3027smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3028{
3029 struct create_lease *buf;
3030
3031 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
3032 if (!buf)
3033 return NULL;
3034
Stefano Brivio729c0c92018-07-05 15:10:02 +02003035 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003036 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003037
3038 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3039 (struct create_lease, lcontext));
3040 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
3041 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3042 (struct create_lease, Name));
3043 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003044 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003045 buf->Name[0] = 'R';
3046 buf->Name[1] = 'q';
3047 buf->Name[2] = 'L';
3048 buf->Name[3] = 's';
3049 return (char *)buf;
3050}
3051
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003052static char *
3053smb3_create_lease_buf(u8 *lease_key, u8 oplock)
3054{
3055 struct create_lease_v2 *buf;
3056
3057 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
3058 if (!buf)
3059 return NULL;
3060
Stefano Brivio729c0c92018-07-05 15:10:02 +02003061 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003062 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3063
3064 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3065 (struct create_lease_v2, lcontext));
3066 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
3067 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3068 (struct create_lease_v2, Name));
3069 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003070 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003071 buf->Name[0] = 'R';
3072 buf->Name[1] = 'q';
3073 buf->Name[2] = 'L';
3074 buf->Name[3] = 's';
3075 return (char *)buf;
3076}
3077
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003078static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003079smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003080{
3081 struct create_lease *lc = (struct create_lease *)buf;
3082
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003083 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003084 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3085 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3086 return le32_to_cpu(lc->lcontext.LeaseState);
3087}
3088
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003089static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003090smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003091{
3092 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
3093
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003094 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003095 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3096 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003097 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02003098 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003099 return le32_to_cpu(lc->lcontext.LeaseState);
3100}
3101
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003102static unsigned int
3103smb2_wp_retry_size(struct inode *inode)
3104{
3105 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
3106 SMB2_MAX_BUFFER_SIZE);
3107}
3108
Pavel Shilovsky52755802014-08-18 20:49:57 +04003109static bool
3110smb2_dir_needs_close(struct cifsFileInfo *cfile)
3111{
3112 return !cfile->invalidHandle;
3113}
3114
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003115static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003116fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
3117 struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003118{
3119 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003120 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003121
3122 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
3123 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
3124 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
3125 tr_hdr->Flags = cpu_to_le16(0x01);
3126 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CMM_NONCE);
3127 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003128}
3129
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003130/* We can not use the normal sg_set_buf() as we will sometimes pass a
3131 * stack object as buf.
3132 */
3133static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3134 unsigned int buflen)
3135{
3136 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
3137}
3138
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003139/* Assumes the first rqst has a transform header as the first iov.
3140 * I.e.
3141 * rqst[0].rq_iov[0] is transform header
3142 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
3143 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003144 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003145static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003146init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003147{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003148 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003149 struct scatterlist *sg;
3150 unsigned int i;
3151 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003152 unsigned int idx = 0;
3153 int skip;
3154
3155 sg_len = 1;
3156 for (i = 0; i < num_rqst; i++)
3157 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003158
3159 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
3160 if (!sg)
3161 return NULL;
3162
3163 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003164 for (i = 0; i < num_rqst; i++) {
3165 for (j = 0; j < rqst[i].rq_nvec; j++) {
3166 /*
3167 * The first rqst has a transform header where the
3168 * first 20 bytes are not part of the encrypted blob
3169 */
3170 skip = (i == 0) && (j == 0) ? 20 : 0;
3171 smb2_sg_set_buf(&sg[idx++],
3172 rqst[i].rq_iov[j].iov_base + skip,
3173 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10003174 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05003175
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003176 for (j = 0; j < rqst[i].rq_npages; j++) {
3177 unsigned int len, offset;
3178
3179 rqst_page_get_length(&rqst[i], j, &len, &offset);
3180 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
3181 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003182 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003183 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003184 return sg;
3185}
3186
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003187static int
3188smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
3189{
3190 struct cifs_ses *ses;
3191 u8 *ses_enc_key;
3192
3193 spin_lock(&cifs_tcp_ses_lock);
3194 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3195 if (ses->Suid != ses_id)
3196 continue;
3197 ses_enc_key = enc ? ses->smb3encryptionkey :
3198 ses->smb3decryptionkey;
3199 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
3200 spin_unlock(&cifs_tcp_ses_lock);
3201 return 0;
3202 }
3203 spin_unlock(&cifs_tcp_ses_lock);
3204
3205 return 1;
3206}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003207/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003208 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
3209 * iov[0] - transform header (associate data),
3210 * iov[1-N] - SMB2 header and pages - data to encrypt.
3211 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003212 * untouched.
3213 */
3214static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003215crypt_message(struct TCP_Server_Info *server, int num_rqst,
3216 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003217{
3218 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003219 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003220 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003221 int rc = 0;
3222 struct scatterlist *sg;
3223 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003224 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003225 struct aead_request *req;
3226 char *iv;
3227 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003228 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003229 struct crypto_aead *tfm;
3230 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3231
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003232 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
3233 if (rc) {
3234 cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
3235 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003236 return 0;
3237 }
3238
3239 rc = smb3_crypto_aead_allocate(server);
3240 if (rc) {
3241 cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
3242 return rc;
3243 }
3244
3245 tfm = enc ? server->secmech.ccmaesencrypt :
3246 server->secmech.ccmaesdecrypt;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003247 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003248 if (rc) {
3249 cifs_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
3250 return rc;
3251 }
3252
3253 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3254 if (rc) {
3255 cifs_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
3256 return rc;
3257 }
3258
3259 req = aead_request_alloc(tfm, GFP_KERNEL);
3260 if (!req) {
3261 cifs_dbg(VFS, "%s: Failed to alloc aead request", __func__);
3262 return -ENOMEM;
3263 }
3264
3265 if (!enc) {
3266 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3267 crypt_len += SMB2_SIGNATURE_SIZE;
3268 }
3269
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003270 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003271 if (!sg) {
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003272 cifs_dbg(VFS, "%s: Failed to init sg", __func__);
3273 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003274 goto free_req;
3275 }
3276
3277 iv_len = crypto_aead_ivsize(tfm);
3278 iv = kzalloc(iv_len, GFP_KERNEL);
3279 if (!iv) {
3280 cifs_dbg(VFS, "%s: Failed to alloc IV", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003281 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003282 goto free_sg;
3283 }
3284 iv[0] = 3;
3285 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CMM_NONCE);
3286
3287 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
3288 aead_request_set_ad(req, assoc_data_len);
3289
3290 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003291 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003292
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003293 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
3294 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003295
3296 if (!rc && enc)
3297 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
3298
3299 kfree(iv);
3300free_sg:
3301 kfree(sg);
3302free_req:
3303 kfree(req);
3304 return rc;
3305}
3306
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003307void
3308smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003309{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003310 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003311
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003312 for (i = 0; i < num_rqst; i++) {
3313 if (rqst[i].rq_pages) {
3314 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
3315 put_page(rqst[i].rq_pages[j]);
3316 kfree(rqst[i].rq_pages);
3317 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003318 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003319}
3320
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003321/*
3322 * This function will initialize new_rq and encrypt the content.
3323 * The first entry, new_rq[0], only contains a single iov which contains
3324 * a smb2_transform_hdr and is pre-allocated by the caller.
3325 * This function then populates new_rq[1+] with the content from olq_rq[0+].
3326 *
3327 * The end result is an array of smb_rqst structures where the first structure
3328 * only contains a single iov for the transform header which we then can pass
3329 * to crypt_message().
3330 *
3331 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
3332 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
3333 */
3334static int
3335smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
3336 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003337{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003338 struct page **pages;
3339 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
3340 unsigned int npages;
3341 unsigned int orig_len = 0;
3342 int i, j;
3343 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003344
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003345 for (i = 1; i < num_rqst; i++) {
3346 npages = old_rq[i - 1].rq_npages;
3347 pages = kmalloc_array(npages, sizeof(struct page *),
3348 GFP_KERNEL);
3349 if (!pages)
3350 goto err_free;
3351
3352 new_rq[i].rq_pages = pages;
3353 new_rq[i].rq_npages = npages;
3354 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
3355 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
3356 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
3357 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
3358 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
3359
3360 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
3361
3362 for (j = 0; j < npages; j++) {
3363 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3364 if (!pages[j])
3365 goto err_free;
3366 }
3367
3368 /* copy pages form the old */
3369 for (j = 0; j < npages; j++) {
3370 char *dst, *src;
3371 unsigned int offset, len;
3372
3373 rqst_page_get_length(&new_rq[i], j, &len, &offset);
3374
3375 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
3376 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
3377
3378 memcpy(dst, src, len);
3379 kunmap(new_rq[i].rq_pages[j]);
3380 kunmap(old_rq[i - 1].rq_pages[j]);
3381 }
3382 }
3383
3384 /* fill the 1st iov with a transform header */
3385 fill_transform_hdr(tr_hdr, orig_len, old_rq);
3386
3387 rc = crypt_message(server, num_rqst, new_rq, 1);
3388 cifs_dbg(FYI, "encrypt message returned %d", rc);
3389 if (rc)
3390 goto err_free;
3391
3392 return rc;
3393
3394err_free:
3395 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
3396 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003397}
3398
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003399static int
3400smb3_is_transform_hdr(void *buf)
3401{
3402 struct smb2_transform_hdr *trhdr = buf;
3403
3404 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
3405}
3406
3407static int
3408decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
3409 unsigned int buf_data_size, struct page **pages,
3410 unsigned int npages, unsigned int page_data_size)
3411{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003412 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003413 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003414 int rc;
3415
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003416 iov[0].iov_base = buf;
3417 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
3418 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
3419 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003420
3421 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003422 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003423 rqst.rq_pages = pages;
3424 rqst.rq_npages = npages;
3425 rqst.rq_pagesz = PAGE_SIZE;
3426 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
3427
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003428 rc = crypt_message(server, 1, &rqst, 0);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003429 cifs_dbg(FYI, "decrypt message returned %d\n", rc);
3430
3431 if (rc)
3432 return rc;
3433
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003434 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003435
3436 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003437
3438 return rc;
3439}
3440
3441static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003442read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
3443 unsigned int npages, unsigned int len)
3444{
3445 int i;
3446 int length;
3447
3448 for (i = 0; i < npages; i++) {
3449 struct page *page = pages[i];
3450 size_t n;
3451
3452 n = len;
3453 if (len >= PAGE_SIZE) {
3454 /* enough data to fill the page */
3455 n = PAGE_SIZE;
3456 len -= n;
3457 } else {
3458 zero_user(page, len, PAGE_SIZE - len);
3459 len = 0;
3460 }
Long Li1dbe3462018-05-30 12:47:55 -07003461 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003462 if (length < 0)
3463 return length;
3464 server->total_read += length;
3465 }
3466
3467 return 0;
3468}
3469
3470static int
3471init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
3472 unsigned int cur_off, struct bio_vec **page_vec)
3473{
3474 struct bio_vec *bvec;
3475 int i;
3476
3477 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
3478 if (!bvec)
3479 return -ENOMEM;
3480
3481 for (i = 0; i < npages; i++) {
3482 bvec[i].bv_page = pages[i];
3483 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
3484 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
3485 data_size -= bvec[i].bv_len;
3486 }
3487
3488 if (data_size != 0) {
3489 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
3490 kfree(bvec);
3491 return -EIO;
3492 }
3493
3494 *page_vec = bvec;
3495 return 0;
3496}
3497
3498static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003499handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3500 char *buf, unsigned int buf_len, struct page **pages,
3501 unsigned int npages, unsigned int page_data_size)
3502{
3503 unsigned int data_offset;
3504 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003505 unsigned int cur_off;
3506 unsigned int cur_page_idx;
3507 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003508 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10003509 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003510 struct bio_vec *bvec = NULL;
3511 struct iov_iter iter;
3512 struct kvec iov;
3513 int length;
Long Li74dcf412017-11-22 17:38:46 -07003514 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003515
3516 if (shdr->Command != SMB2_READ) {
3517 cifs_dbg(VFS, "only big read responses are supported\n");
3518 return -ENOTSUPP;
3519 }
3520
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003521 if (server->ops->is_session_expired &&
3522 server->ops->is_session_expired(buf)) {
3523 cifs_reconnect(server);
3524 wake_up(&server->response_q);
3525 return -1;
3526 }
3527
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003528 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08003529 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003530 return -1;
3531
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003532 /* set up first two iov to get credits */
3533 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003534 rdata->iov[0].iov_len = 0;
3535 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003536 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003537 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003538 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3539 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3540 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3541 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3542
3543 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003544 if (rdata->result != 0) {
3545 cifs_dbg(FYI, "%s: server returned error %d\n",
3546 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003547 /* normal error on read response */
3548 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003549 return 0;
3550 }
3551
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003552 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07003553#ifdef CONFIG_CIFS_SMB_DIRECT
3554 use_rdma_mr = rdata->mr;
3555#endif
3556 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003557
3558 if (data_offset < server->vals->read_rsp_size) {
3559 /*
3560 * win2k8 sometimes sends an offset of 0 when the read
3561 * is beyond the EOF. Treat it as if the data starts just after
3562 * the header.
3563 */
3564 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
3565 __func__, data_offset);
3566 data_offset = server->vals->read_rsp_size;
3567 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
3568 /* data_offset is beyond the end of smallbuf */
3569 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
3570 __func__, data_offset);
3571 rdata->result = -EIO;
3572 dequeue_mid(mid, rdata->result);
3573 return 0;
3574 }
3575
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003576 pad_len = data_offset - server->vals->read_rsp_size;
3577
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003578 if (buf_len <= data_offset) {
3579 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003580 cur_page_idx = pad_len / PAGE_SIZE;
3581 cur_off = pad_len % PAGE_SIZE;
3582
3583 if (cur_page_idx != 0) {
3584 /* data offset is beyond the 1st page of response */
3585 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
3586 __func__, data_offset);
3587 rdata->result = -EIO;
3588 dequeue_mid(mid, rdata->result);
3589 return 0;
3590 }
3591
3592 if (data_len > page_data_size - pad_len) {
3593 /* data_len is corrupt -- discard frame */
3594 rdata->result = -EIO;
3595 dequeue_mid(mid, rdata->result);
3596 return 0;
3597 }
3598
3599 rdata->result = init_read_bvec(pages, npages, page_data_size,
3600 cur_off, &bvec);
3601 if (rdata->result != 0) {
3602 dequeue_mid(mid, rdata->result);
3603 return 0;
3604 }
3605
David Howellsaa563d72018-10-20 00:57:56 +01003606 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003607 } else if (buf_len >= data_offset + data_len) {
3608 /* read response payload is in buf */
3609 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
3610 iov.iov_base = buf + data_offset;
3611 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01003612 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003613 } else {
3614 /* read response payload cannot be in both buf and pages */
3615 WARN_ONCE(1, "buf can not contain only a part of read data");
3616 rdata->result = -EIO;
3617 dequeue_mid(mid, rdata->result);
3618 return 0;
3619 }
3620
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003621 length = rdata->copy_into_pages(server, rdata, &iter);
3622
3623 kfree(bvec);
3624
3625 if (length < 0)
3626 return length;
3627
3628 dequeue_mid(mid, false);
3629 return length;
3630}
3631
3632static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003633receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
3634{
3635 char *buf = server->smallbuf;
3636 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
3637 unsigned int npages;
3638 struct page **pages;
3639 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003640 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003641 int rc;
3642 int i = 0;
3643
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003644 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003645 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
3646
3647 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
3648 if (rc < 0)
3649 return rc;
3650 server->total_read += rc;
3651
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003652 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11003653 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003654 npages = DIV_ROUND_UP(len, PAGE_SIZE);
3655
3656 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
3657 if (!pages) {
3658 rc = -ENOMEM;
3659 goto discard_data;
3660 }
3661
3662 for (; i < npages; i++) {
3663 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3664 if (!pages[i]) {
3665 rc = -ENOMEM;
3666 goto discard_data;
3667 }
3668 }
3669
3670 /* read read data into pages */
3671 rc = read_data_into_pages(server, pages, npages, len);
3672 if (rc)
3673 goto free_pages;
3674
Pavel Shilovsky350be252017-04-10 10:31:33 -07003675 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003676 if (rc)
3677 goto free_pages;
3678
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003679 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003680 pages, npages, len);
3681 if (rc)
3682 goto free_pages;
3683
3684 *mid = smb2_find_mid(server, buf);
3685 if (*mid == NULL)
3686 cifs_dbg(FYI, "mid not found\n");
3687 else {
3688 cifs_dbg(FYI, "mid found\n");
3689 (*mid)->decrypted = true;
3690 rc = handle_read_data(server, *mid, buf,
3691 server->vals->read_rsp_size,
3692 pages, npages, len);
3693 }
3694
3695free_pages:
3696 for (i = i - 1; i >= 0; i--)
3697 put_page(pages[i]);
3698 kfree(pages);
3699 return rc;
3700discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07003701 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003702 goto free_pages;
3703}
3704
3705static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003706receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003707 struct mid_q_entry **mids, char **bufs,
3708 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003709{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003710 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003711 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003712 char *tmpbuf;
3713 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10003714 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003715 unsigned int buf_size;
3716 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003717 int next_is_large;
3718 char *next_buffer = NULL;
3719
3720 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003721
3722 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003723 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003724 server->large_buf = true;
3725 memcpy(server->bigbuf, buf, server->total_read);
3726 buf = server->bigbuf;
3727 }
3728
3729 /* now read the rest */
3730 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003731 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003732 if (length < 0)
3733 return length;
3734 server->total_read += length;
3735
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003736 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003737 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
3738 if (length)
3739 return length;
3740
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003741 next_is_large = server->large_buf;
3742 one_more:
3743 shdr = (struct smb2_sync_hdr *)buf;
3744 if (shdr->NextCommand) {
3745 if (next_is_large) {
3746 tmpbuf = server->bigbuf;
3747 next_buffer = (char *)cifs_buf_get();
3748 } else {
3749 tmpbuf = server->smallbuf;
3750 next_buffer = (char *)cifs_small_buf_get();
3751 }
3752 memcpy(next_buffer,
3753 tmpbuf + le32_to_cpu(shdr->NextCommand),
3754 pdu_length - le32_to_cpu(shdr->NextCommand));
3755 }
3756
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003757 mid_entry = smb2_find_mid(server, buf);
3758 if (mid_entry == NULL)
3759 cifs_dbg(FYI, "mid not found\n");
3760 else {
3761 cifs_dbg(FYI, "mid found\n");
3762 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003763 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003764 }
3765
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003766 if (*num_mids >= MAX_COMPOUND) {
3767 cifs_dbg(VFS, "too many PDUs in compound\n");
3768 return -1;
3769 }
3770 bufs[*num_mids] = buf;
3771 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003772
3773 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003774 ret = mid_entry->handle(server, mid_entry);
3775 else
3776 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003777
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003778 if (ret == 0 && shdr->NextCommand) {
3779 pdu_length -= le32_to_cpu(shdr->NextCommand);
3780 server->large_buf = next_is_large;
3781 if (next_is_large)
3782 server->bigbuf = next_buffer;
3783 else
3784 server->smallbuf = next_buffer;
3785
3786 buf += le32_to_cpu(shdr->NextCommand);
3787 goto one_more;
3788 }
3789
3790 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003791}
3792
3793static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003794smb3_receive_transform(struct TCP_Server_Info *server,
3795 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003796{
3797 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10003798 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003799 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
3800 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3801
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003802 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003803 sizeof(struct smb2_sync_hdr)) {
3804 cifs_dbg(VFS, "Transform message is too small (%u)\n",
3805 pdu_length);
3806 cifs_reconnect(server);
3807 wake_up(&server->response_q);
3808 return -ECONNABORTED;
3809 }
3810
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003811 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003812 cifs_dbg(VFS, "Transform message is broken\n");
3813 cifs_reconnect(server);
3814 wake_up(&server->response_q);
3815 return -ECONNABORTED;
3816 }
3817
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003818 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08003819 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
3820 *num_mids = 1;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003821 return receive_encrypted_read(server, &mids[0]);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08003822 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003823
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10003824 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003825}
3826
3827int
3828smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
3829{
3830 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
3831
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003832 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003833 NULL, 0, 0);
3834}
3835
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10003836static int
3837smb2_next_header(char *buf)
3838{
3839 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
3840 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
3841
3842 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
3843 return sizeof(struct smb2_transform_hdr) +
3844 le32_to_cpu(t_hdr->OriginalMessageSize);
3845
3846 return le32_to_cpu(hdr->NextCommand);
3847}
3848
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05003849static int
3850smb2_make_node(unsigned int xid, struct inode *inode,
3851 struct dentry *dentry, struct cifs_tcon *tcon,
3852 char *full_path, umode_t mode, dev_t dev)
3853{
3854 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3855 int rc = -EPERM;
3856 int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
3857 FILE_ALL_INFO *buf = NULL;
3858 struct cifs_io_parms io_parms;
3859 __u32 oplock = 0;
3860 struct cifs_fid fid;
3861 struct cifs_open_parms oparms;
3862 unsigned int bytes_written;
3863 struct win_dev *pdev;
3864 struct kvec iov[2];
3865
3866 /*
3867 * Check if mounted with mount parm 'sfu' mount parm.
3868 * SFU emulation should work with all servers, but only
3869 * supports block and char device (no socket & fifo),
3870 * and was used by default in earlier versions of Windows
3871 */
3872 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
3873 goto out;
3874
3875 /*
3876 * TODO: Add ability to create instead via reparse point. Windows (e.g.
3877 * their current NFS server) uses this approach to expose special files
3878 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
3879 */
3880
3881 if (!S_ISCHR(mode) && !S_ISBLK(mode))
3882 goto out;
3883
3884 cifs_dbg(FYI, "sfu compat create special file\n");
3885
3886 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
3887 if (buf == NULL) {
3888 rc = -ENOMEM;
3889 goto out;
3890 }
3891
3892 if (backup_cred(cifs_sb))
3893 create_options |= CREATE_OPEN_BACKUP_INTENT;
3894
3895 oparms.tcon = tcon;
3896 oparms.cifs_sb = cifs_sb;
3897 oparms.desired_access = GENERIC_WRITE;
3898 oparms.create_options = create_options;
3899 oparms.disposition = FILE_CREATE;
3900 oparms.path = full_path;
3901 oparms.fid = &fid;
3902 oparms.reconnect = false;
3903
3904 if (tcon->ses->server->oplocks)
3905 oplock = REQ_OPLOCK;
3906 else
3907 oplock = 0;
3908 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
3909 if (rc)
3910 goto out;
3911
3912 /*
3913 * BB Do not bother to decode buf since no local inode yet to put
3914 * timestamps in, but we can reuse it safely.
3915 */
3916
3917 pdev = (struct win_dev *)buf;
3918 io_parms.pid = current->tgid;
3919 io_parms.tcon = tcon;
3920 io_parms.offset = 0;
3921 io_parms.length = sizeof(struct win_dev);
3922 iov[1].iov_base = buf;
3923 iov[1].iov_len = sizeof(struct win_dev);
3924 if (S_ISCHR(mode)) {
3925 memcpy(pdev->type, "IntxCHR", 8);
3926 pdev->major = cpu_to_le64(MAJOR(dev));
3927 pdev->minor = cpu_to_le64(MINOR(dev));
3928 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
3929 &bytes_written, iov, 1);
3930 } else if (S_ISBLK(mode)) {
3931 memcpy(pdev->type, "IntxBLK", 8);
3932 pdev->major = cpu_to_le64(MAJOR(dev));
3933 pdev->minor = cpu_to_le64(MINOR(dev));
3934 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
3935 &bytes_written, iov, 1);
3936 }
3937 tcon->ses->server->ops->close(xid, tcon, &fid);
3938 d_drop(dentry);
3939
3940 /* FIXME: add code here to set EAs */
3941out:
3942 kfree(buf);
3943 return rc;
3944}
3945
3946
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003947struct smb_version_operations smb20_operations = {
3948 .compare_fids = smb2_compare_fids,
3949 .setup_request = smb2_setup_request,
3950 .setup_async_request = smb2_setup_async_request,
3951 .check_receive = smb2_check_receive,
3952 .add_credits = smb2_add_credits,
3953 .set_credits = smb2_set_credits,
3954 .get_credits_field = smb2_get_credits_field,
3955 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04003956 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003957 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08003958 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003959 .read_data_offset = smb2_read_data_offset,
3960 .read_data_length = smb2_read_data_length,
3961 .map_error = map_smb2_to_linux_error,
3962 .find_mid = smb2_find_mid,
3963 .check_message = smb2_check_message,
3964 .dump_detail = smb2_dump_detail,
3965 .clear_stats = smb2_clear_stats,
3966 .print_stats = smb2_print_stats,
3967 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08003968 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003969 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003970 .need_neg = smb2_need_neg,
3971 .negotiate = smb2_negotiate,
3972 .negotiate_wsize = smb2_negotiate_wsize,
3973 .negotiate_rsize = smb2_negotiate_rsize,
3974 .sess_setup = SMB2_sess_setup,
3975 .logoff = SMB2_logoff,
3976 .tree_connect = SMB2_tcon,
3977 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05003978 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003979 .is_path_accessible = smb2_is_path_accessible,
3980 .can_echo = smb2_can_echo,
3981 .echo = SMB2_echo,
3982 .query_path_info = smb2_query_path_info,
3983 .get_srv_inum = smb2_get_srv_inum,
3984 .query_file_info = smb2_query_file_info,
3985 .set_path_size = smb2_set_path_size,
3986 .set_file_size = smb2_set_file_size,
3987 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05003988 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003989 .mkdir = smb2_mkdir,
3990 .mkdir_setinfo = smb2_mkdir_setinfo,
3991 .rmdir = smb2_rmdir,
3992 .unlink = smb2_unlink,
3993 .rename = smb2_rename_path,
3994 .create_hardlink = smb2_create_hardlink,
3995 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01003996 .query_mf_symlink = smb3_query_mf_symlink,
3997 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003998 .open = smb2_open_file,
3999 .set_fid = smb2_set_fid,
4000 .close = smb2_close_file,
4001 .flush = smb2_flush_file,
4002 .async_readv = smb2_async_readv,
4003 .async_writev = smb2_async_writev,
4004 .sync_read = smb2_sync_read,
4005 .sync_write = smb2_sync_write,
4006 .query_dir_first = smb2_query_dir_first,
4007 .query_dir_next = smb2_query_dir_next,
4008 .close_dir = smb2_close_dir,
4009 .calc_smb_size = smb2_calc_size,
4010 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004011 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004012 .oplock_response = smb2_oplock_response,
4013 .queryfs = smb2_queryfs,
4014 .mand_lock = smb2_mand_lock,
4015 .mand_unlock_range = smb2_unlock_range,
4016 .push_mand_locks = smb2_push_mandatory_locks,
4017 .get_lease_key = smb2_get_lease_key,
4018 .set_lease_key = smb2_set_lease_key,
4019 .new_lease_key = smb2_new_lease_key,
4020 .calc_signature = smb2_calc_signature,
4021 .is_read_op = smb2_is_read_op,
4022 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004023 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004024 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004025 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004026 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004027 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004028 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304029 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004030#ifdef CONFIG_CIFS_XATTR
4031 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004032 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004033#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004034#ifdef CONFIG_CIFS_ACL
4035 .get_acl = get_smb2_acl,
4036 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004037 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004038#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004039 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004040 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004041 .make_node = smb2_make_node,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004042};
4043
Steve French1080ef72011-02-24 18:07:19 +00004044struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004045 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004046 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04004047 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004048 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04004049 .add_credits = smb2_add_credits,
4050 .set_credits = smb2_set_credits,
4051 .get_credits_field = smb2_get_credits_field,
4052 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004053 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004054 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004055 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004056 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004057 .read_data_offset = smb2_read_data_offset,
4058 .read_data_length = smb2_read_data_length,
4059 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004060 .find_mid = smb2_find_mid,
4061 .check_message = smb2_check_message,
4062 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04004063 .clear_stats = smb2_clear_stats,
4064 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004065 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004066 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004067 .downgrade_oplock = smb21_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04004068 .need_neg = smb2_need_neg,
4069 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07004070 .negotiate_wsize = smb2_negotiate_wsize,
4071 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04004072 .sess_setup = SMB2_sess_setup,
4073 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04004074 .tree_connect = SMB2_tcon,
4075 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004076 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04004077 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04004078 .can_echo = smb2_can_echo,
4079 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04004080 .query_path_info = smb2_query_path_info,
4081 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07004082 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004083 .set_path_size = smb2_set_path_size,
4084 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004085 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004086 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04004087 .mkdir = smb2_mkdir,
4088 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04004089 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07004090 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004091 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07004092 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004093 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004094 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004095 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07004096 .open = smb2_open_file,
4097 .set_fid = smb2_set_fid,
4098 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07004099 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004100 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07004101 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004102 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004103 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004104 .query_dir_first = smb2_query_dir_first,
4105 .query_dir_next = smb2_query_dir_next,
4106 .close_dir = smb2_close_dir,
4107 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07004108 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004109 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004110 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07004111 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07004112 .mand_lock = smb2_mand_lock,
4113 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07004114 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07004115 .get_lease_key = smb2_get_lease_key,
4116 .set_lease_key = smb2_set_lease_key,
4117 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06004118 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004119 .is_read_op = smb21_is_read_op,
4120 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004121 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004122 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004123 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004124 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004125 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05004126 .enum_snapshots = smb3_enum_snapshots,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004127 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304128 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004129#ifdef CONFIG_CIFS_XATTR
4130 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004131 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004132#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004133#ifdef CONFIG_CIFS_ACL
4134 .get_acl = get_smb2_acl,
4135 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004136 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004137#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004138 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004139 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004140 .make_node = smb2_make_node,
Steve French38107d42012-12-08 22:08:06 -06004141};
4142
Steve French38107d42012-12-08 22:08:06 -06004143struct smb_version_operations smb30_operations = {
4144 .compare_fids = smb2_compare_fids,
4145 .setup_request = smb2_setup_request,
4146 .setup_async_request = smb2_setup_async_request,
4147 .check_receive = smb2_check_receive,
4148 .add_credits = smb2_add_credits,
4149 .set_credits = smb2_set_credits,
4150 .get_credits_field = smb2_get_credits_field,
4151 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004152 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004153 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06004154 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004155 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06004156 .read_data_offset = smb2_read_data_offset,
4157 .read_data_length = smb2_read_data_length,
4158 .map_error = map_smb2_to_linux_error,
4159 .find_mid = smb2_find_mid,
4160 .check_message = smb2_check_message,
4161 .dump_detail = smb2_dump_detail,
4162 .clear_stats = smb2_clear_stats,
4163 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05004164 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06004165 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004166 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004167 .downgrade_oplock = smb21_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06004168 .need_neg = smb2_need_neg,
4169 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004170 .negotiate_wsize = smb3_negotiate_wsize,
4171 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06004172 .sess_setup = SMB2_sess_setup,
4173 .logoff = SMB2_logoff,
4174 .tree_connect = SMB2_tcon,
4175 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05004176 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06004177 .is_path_accessible = smb2_is_path_accessible,
4178 .can_echo = smb2_can_echo,
4179 .echo = SMB2_echo,
4180 .query_path_info = smb2_query_path_info,
4181 .get_srv_inum = smb2_get_srv_inum,
4182 .query_file_info = smb2_query_file_info,
4183 .set_path_size = smb2_set_path_size,
4184 .set_file_size = smb2_set_file_size,
4185 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004186 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06004187 .mkdir = smb2_mkdir,
4188 .mkdir_setinfo = smb2_mkdir_setinfo,
4189 .rmdir = smb2_rmdir,
4190 .unlink = smb2_unlink,
4191 .rename = smb2_rename_path,
4192 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004193 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004194 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004195 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06004196 .open = smb2_open_file,
4197 .set_fid = smb2_set_fid,
4198 .close = smb2_close_file,
4199 .flush = smb2_flush_file,
4200 .async_readv = smb2_async_readv,
4201 .async_writev = smb2_async_writev,
4202 .sync_read = smb2_sync_read,
4203 .sync_write = smb2_sync_write,
4204 .query_dir_first = smb2_query_dir_first,
4205 .query_dir_next = smb2_query_dir_next,
4206 .close_dir = smb2_close_dir,
4207 .calc_smb_size = smb2_calc_size,
4208 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004209 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06004210 .oplock_response = smb2_oplock_response,
4211 .queryfs = smb2_queryfs,
4212 .mand_lock = smb2_mand_lock,
4213 .mand_unlock_range = smb2_unlock_range,
4214 .push_mand_locks = smb2_push_mandatory_locks,
4215 .get_lease_key = smb2_get_lease_key,
4216 .set_lease_key = smb2_set_lease_key,
4217 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004218 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06004219 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004220 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004221 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004222 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004223 .create_lease_buf = smb3_create_lease_buf,
4224 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004225 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05004226 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06004227 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004228 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004229 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05004230 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004231 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004232 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004233 .is_transform_hdr = smb3_is_transform_hdr,
4234 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004235 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304236 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004237#ifdef CONFIG_CIFS_XATTR
4238 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004239 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004240#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004241#ifdef CONFIG_CIFS_ACL
4242 .get_acl = get_smb2_acl,
4243 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004244 .set_acl = set_smb2_acl,
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004245#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004246 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004247 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004248 .make_node = smb2_make_node,
Steve French1080ef72011-02-24 18:07:19 +00004249};
4250
Steve Frenchaab18932015-06-23 23:37:11 -05004251struct smb_version_operations smb311_operations = {
4252 .compare_fids = smb2_compare_fids,
4253 .setup_request = smb2_setup_request,
4254 .setup_async_request = smb2_setup_async_request,
4255 .check_receive = smb2_check_receive,
4256 .add_credits = smb2_add_credits,
4257 .set_credits = smb2_set_credits,
4258 .get_credits_field = smb2_get_credits_field,
4259 .get_credits = smb2_get_credits,
4260 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004261 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05004262 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004263 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05004264 .read_data_offset = smb2_read_data_offset,
4265 .read_data_length = smb2_read_data_length,
4266 .map_error = map_smb2_to_linux_error,
4267 .find_mid = smb2_find_mid,
4268 .check_message = smb2_check_message,
4269 .dump_detail = smb2_dump_detail,
4270 .clear_stats = smb2_clear_stats,
4271 .print_stats = smb2_print_stats,
4272 .dump_share_caps = smb2_dump_share_caps,
4273 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004274 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004275 .downgrade_oplock = smb21_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05004276 .need_neg = smb2_need_neg,
4277 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004278 .negotiate_wsize = smb3_negotiate_wsize,
4279 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05004280 .sess_setup = SMB2_sess_setup,
4281 .logoff = SMB2_logoff,
4282 .tree_connect = SMB2_tcon,
4283 .tree_disconnect = SMB2_tdis,
4284 .qfs_tcon = smb3_qfs_tcon,
4285 .is_path_accessible = smb2_is_path_accessible,
4286 .can_echo = smb2_can_echo,
4287 .echo = SMB2_echo,
4288 .query_path_info = smb2_query_path_info,
4289 .get_srv_inum = smb2_get_srv_inum,
4290 .query_file_info = smb2_query_file_info,
4291 .set_path_size = smb2_set_path_size,
4292 .set_file_size = smb2_set_file_size,
4293 .set_file_info = smb2_set_file_info,
4294 .set_compression = smb2_set_compression,
4295 .mkdir = smb2_mkdir,
4296 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05004297 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05004298 .rmdir = smb2_rmdir,
4299 .unlink = smb2_unlink,
4300 .rename = smb2_rename_path,
4301 .create_hardlink = smb2_create_hardlink,
4302 .query_symlink = smb2_query_symlink,
4303 .query_mf_symlink = smb3_query_mf_symlink,
4304 .create_mf_symlink = smb3_create_mf_symlink,
4305 .open = smb2_open_file,
4306 .set_fid = smb2_set_fid,
4307 .close = smb2_close_file,
4308 .flush = smb2_flush_file,
4309 .async_readv = smb2_async_readv,
4310 .async_writev = smb2_async_writev,
4311 .sync_read = smb2_sync_read,
4312 .sync_write = smb2_sync_write,
4313 .query_dir_first = smb2_query_dir_first,
4314 .query_dir_next = smb2_query_dir_next,
4315 .close_dir = smb2_close_dir,
4316 .calc_smb_size = smb2_calc_size,
4317 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004318 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05004319 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05004320 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05004321 .mand_lock = smb2_mand_lock,
4322 .mand_unlock_range = smb2_unlock_range,
4323 .push_mand_locks = smb2_push_mandatory_locks,
4324 .get_lease_key = smb2_get_lease_key,
4325 .set_lease_key = smb2_set_lease_key,
4326 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004327 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05004328 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004329 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05004330 .is_read_op = smb21_is_read_op,
4331 .set_oplock_level = smb3_set_oplock_level,
4332 .create_lease_buf = smb3_create_lease_buf,
4333 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004334 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07004335 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05004336/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
4337 .wp_retry_size = smb2_wp_retry_size,
4338 .dir_needs_close = smb2_dir_needs_close,
4339 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004340 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004341 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004342 .is_transform_hdr = smb3_is_transform_hdr,
4343 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004344 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304345 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004346#ifdef CONFIG_CIFS_XATTR
4347 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004348 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004349#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10004350#ifdef CONFIG_CIFS_ACL
4351 .get_acl = get_smb2_acl,
4352 .get_acl_by_fid = get_smb2_acl_by_fid,
4353 .set_acl = set_smb2_acl,
4354#endif /* CIFS_ACL */
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004355 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004356 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004357 .make_node = smb2_make_node,
Steve Frenchaab18932015-06-23 23:37:11 -05004358};
Steve Frenchaab18932015-06-23 23:37:11 -05004359
Steve Frenchdd446b12012-11-28 23:21:06 -06004360struct smb_version_values smb20_values = {
4361 .version_string = SMB20_VERSION_STRING,
4362 .protocol_id = SMB20_PROT_ID,
4363 .req_capabilities = 0, /* MBZ */
4364 .large_lock_type = 0,
4365 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4366 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4367 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004368 .header_size = sizeof(struct smb2_sync_hdr),
4369 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06004370 .max_header_size = MAX_SMB2_HDR_SIZE,
4371 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4372 .lock_cmd = SMB2_LOCK,
4373 .cap_unix = 0,
4374 .cap_nt_find = SMB2_NT_FIND,
4375 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004376 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4377 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004378 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06004379};
4380
Steve French1080ef72011-02-24 18:07:19 +00004381struct smb_version_values smb21_values = {
4382 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004383 .protocol_id = SMB21_PROT_ID,
4384 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
4385 .large_lock_type = 0,
4386 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4387 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4388 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004389 .header_size = sizeof(struct smb2_sync_hdr),
4390 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004391 .max_header_size = MAX_SMB2_HDR_SIZE,
4392 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4393 .lock_cmd = SMB2_LOCK,
4394 .cap_unix = 0,
4395 .cap_nt_find = SMB2_NT_FIND,
4396 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004397 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4398 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004399 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05004400};
4401
Steve French9764c022017-09-17 10:41:35 -05004402struct smb_version_values smb3any_values = {
4403 .version_string = SMB3ANY_VERSION_STRING,
4404 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004405 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004406 .large_lock_type = 0,
4407 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4408 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4409 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004410 .header_size = sizeof(struct smb2_sync_hdr),
4411 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004412 .max_header_size = MAX_SMB2_HDR_SIZE,
4413 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4414 .lock_cmd = SMB2_LOCK,
4415 .cap_unix = 0,
4416 .cap_nt_find = SMB2_NT_FIND,
4417 .cap_large_files = SMB2_LARGE_FILES,
4418 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4419 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4420 .create_lease_size = sizeof(struct create_lease_v2),
4421};
4422
4423struct smb_version_values smbdefault_values = {
4424 .version_string = SMBDEFAULT_VERSION_STRING,
4425 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004426 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004427 .large_lock_type = 0,
4428 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4429 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4430 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004431 .header_size = sizeof(struct smb2_sync_hdr),
4432 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004433 .max_header_size = MAX_SMB2_HDR_SIZE,
4434 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4435 .lock_cmd = SMB2_LOCK,
4436 .cap_unix = 0,
4437 .cap_nt_find = SMB2_NT_FIND,
4438 .cap_large_files = SMB2_LARGE_FILES,
4439 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4440 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4441 .create_lease_size = sizeof(struct create_lease_v2),
4442};
4443
Steve Frenche4aa25e2012-10-01 12:26:22 -05004444struct smb_version_values smb30_values = {
4445 .version_string = SMB30_VERSION_STRING,
4446 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004447 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004448 .large_lock_type = 0,
4449 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4450 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4451 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004452 .header_size = sizeof(struct smb2_sync_hdr),
4453 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004454 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004455 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004456 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004457 .cap_unix = 0,
4458 .cap_nt_find = SMB2_NT_FIND,
4459 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004460 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4461 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004462 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00004463};
Steve French20b6d8b2013-06-12 22:48:41 -05004464
4465struct smb_version_values smb302_values = {
4466 .version_string = SMB302_VERSION_STRING,
4467 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004468 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05004469 .large_lock_type = 0,
4470 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4471 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4472 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004473 .header_size = sizeof(struct smb2_sync_hdr),
4474 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05004475 .max_header_size = MAX_SMB2_HDR_SIZE,
4476 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4477 .lock_cmd = SMB2_LOCK,
4478 .cap_unix = 0,
4479 .cap_nt_find = SMB2_NT_FIND,
4480 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004481 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4482 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004483 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05004484};
Steve French5f7fbf72014-12-17 22:52:58 -06004485
Steve French5f7fbf72014-12-17 22:52:58 -06004486struct smb_version_values smb311_values = {
4487 .version_string = SMB311_VERSION_STRING,
4488 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004489 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06004490 .large_lock_type = 0,
4491 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4492 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4493 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004494 .header_size = sizeof(struct smb2_sync_hdr),
4495 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06004496 .max_header_size = MAX_SMB2_HDR_SIZE,
4497 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4498 .lock_cmd = SMB2_LOCK,
4499 .cap_unix = 0,
4500 .cap_nt_find = SMB2_NT_FIND,
4501 .cap_large_files = SMB2_LARGE_FILES,
4502 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4503 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4504 .create_lease_size = sizeof(struct create_lease_v2),
4505};