blob: 19589922ef2b31b7fa9f534814dfd8fc2a58d205 [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Steve French1080ef72011-02-24 18:07:19 +00006 */
7
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07008#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07009#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050010#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070011#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020012#include <linux/uuid.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070013#include <crypto/aead.h>
Steve French1080ef72011-02-24 18:07:19 +000014#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040015#include "smb2pdu.h"
16#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040017#include "cifsproto.h"
18#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040019#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070020#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070021#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050022#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070023#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040024
Pavel Shilovskyef68e832019-01-18 17:25:36 -080025/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040026static int
27change_conf(struct TCP_Server_Info *server)
28{
29 server->credits += server->echo_credits + server->oplock_credits;
30 server->oplock_credits = server->echo_credits = 0;
31 switch (server->credits) {
32 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080033 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040034 case 1:
35 server->echoes = false;
36 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040037 break;
38 case 2:
39 server->echoes = true;
40 server->oplocks = false;
41 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040042 break;
43 default:
44 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050045 if (enable_oplocks) {
46 server->oplocks = true;
47 server->oplock_credits = 1;
48 } else
49 server->oplocks = false;
50
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040051 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040052 }
53 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080054 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040055}
56
57static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080058smb2_add_credits(struct TCP_Server_Info *server,
59 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040060{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080061 int *val, rc = -1;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080062 unsigned int add = credits->value;
63 unsigned int instance = credits->instance;
64 bool reconnect_detected = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080065
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040066 spin_lock(&server->req_lock);
67 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050068
69 /* eg found case where write overlapping reconnect messed up credits */
70 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
71 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
72 server->hostname, *val);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080073 if ((instance == 0) || (instance == server->reconnect_instance))
74 *val += add;
75 else
76 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050077
Steve French141891f2016-09-23 00:44:16 -050078 if (*val > 65000) {
79 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
80 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
81 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040082 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040083 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040084 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070085 /*
86 * Sometimes server returns 0 credits on oplock break ack - we need to
87 * rebalance credits in this case.
88 */
89 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
90 server->oplocks) {
91 if (server->credits > 1) {
92 server->credits--;
93 server->oplock_credits++;
94 }
95 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040096 spin_unlock(&server->req_lock);
97 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -080098
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080099 if (reconnect_detected)
100 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
101 add, instance);
102
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800103 if (server->tcpStatus == CifsNeedReconnect
104 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800105 return;
106
107 switch (rc) {
108 case -1:
109 /* change_conf hasn't been executed */
110 break;
111 case 0:
112 cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
113 break;
114 case 1:
115 cifs_dbg(VFS, "disabling echoes and oplocks\n");
116 break;
117 case 2:
118 cifs_dbg(FYI, "disabling oplocks\n");
119 break;
120 default:
121 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
122 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400123}
124
125static void
126smb2_set_credits(struct TCP_Server_Info *server, const int val)
127{
128 spin_lock(&server->req_lock);
129 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500130 if (val == 1)
131 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400132 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500133 /* don't log while holding the lock */
134 if (val == 1)
135 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400136}
137
138static int *
139smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
140{
141 switch (optype) {
142 case CIFS_ECHO_OP:
143 return &server->echo_credits;
144 case CIFS_OBREAK_OP:
145 return &server->oplock_credits;
146 default:
147 return &server->credits;
148 }
149}
150
151static unsigned int
152smb2_get_credits(struct mid_q_entry *mid)
153{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000154 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700155
Pavel Shilovsky3d3003f2019-01-22 16:50:21 -0800156 if (mid->mid_state == MID_RESPONSE_RECEIVED
157 || mid->mid_state == MID_RESPONSE_MALFORMED)
158 return le16_to_cpu(shdr->CreditRequest);
159
160 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400161}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400162
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400163static int
164smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800165 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400166{
167 int rc = 0;
168 unsigned int scredits;
169
170 spin_lock(&server->req_lock);
171 while (1) {
172 if (server->credits <= 0) {
173 spin_unlock(&server->req_lock);
174 cifs_num_waiters_inc(server);
175 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000176 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400177 cifs_num_waiters_dec(server);
178 if (rc)
179 return rc;
180 spin_lock(&server->req_lock);
181 } else {
182 if (server->tcpStatus == CifsExiting) {
183 spin_unlock(&server->req_lock);
184 return -ENOENT;
185 }
186
187 scredits = server->credits;
188 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800189 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400190 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800191 credits->value = 0;
192 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400193 break;
194 }
195
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800196 /* leave some credits for reopen and other ops */
197 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400198 *num = min_t(unsigned int, size,
199 scredits * SMB2_MAX_BUFFER_SIZE);
200
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800201 credits->value =
202 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
203 credits->instance = server->reconnect_instance;
204 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400205 server->in_flight++;
206 break;
207 }
208 }
209 spin_unlock(&server->req_lock);
210 return rc;
211}
212
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800213static int
214smb2_adjust_credits(struct TCP_Server_Info *server,
215 struct cifs_credits *credits,
216 const unsigned int payload_size)
217{
218 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
219
220 if (!credits->value || credits->value == new_val)
221 return 0;
222
223 if (credits->value < new_val) {
224 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
225 credits->value, new_val);
226 return -ENOTSUPP;
227 }
228
229 spin_lock(&server->req_lock);
230
231 if (server->reconnect_instance != credits->instance) {
232 spin_unlock(&server->req_lock);
233 cifs_dbg(VFS, "trying to return %d credits to old session\n",
234 credits->value - new_val);
235 return -EAGAIN;
236 }
237
238 server->credits += credits->value - new_val;
239 spin_unlock(&server->req_lock);
240 wake_up(&server->request_q);
241 credits->value = new_val;
242 return 0;
243}
244
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400245static __u64
246smb2_get_next_mid(struct TCP_Server_Info *server)
247{
248 __u64 mid;
249 /* for SMB2 we need the current value */
250 spin_lock(&GlobalMid_Lock);
251 mid = server->CurrentMid++;
252 spin_unlock(&GlobalMid_Lock);
253 return mid;
254}
Steve French1080ef72011-02-24 18:07:19 +0000255
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800256static void
257smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
258{
259 spin_lock(&GlobalMid_Lock);
260 if (server->CurrentMid >= val)
261 server->CurrentMid -= val;
262 spin_unlock(&GlobalMid_Lock);
263}
264
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400265static struct mid_q_entry *
266smb2_find_mid(struct TCP_Server_Info *server, char *buf)
267{
268 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000269 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700270 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400271
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700272 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Christoph Probsta205d502019-05-08 21:36:25 +0200273 cifs_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600274 return NULL;
275 }
276
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400277 spin_lock(&GlobalMid_Lock);
278 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000279 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400280 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700281 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200282 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400283 spin_unlock(&GlobalMid_Lock);
284 return mid;
285 }
286 }
287 spin_unlock(&GlobalMid_Lock);
288 return NULL;
289}
290
291static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600292smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400293{
294#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000295 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400296
Joe Perchesf96637b2013-05-04 22:12:25 -0500297 cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700298 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
299 shdr->ProcessId);
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600300 cifs_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500301 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400302#endif
303}
304
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400305static bool
306smb2_need_neg(struct TCP_Server_Info *server)
307{
308 return server->max_read == 0;
309}
310
311static int
312smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
313{
314 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200315
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400316 ses->server->CurrentMid = 0;
317 rc = SMB2_negotiate(xid, ses);
318 /* BB we probably don't need to retry with modern servers */
319 if (rc == -EAGAIN)
320 rc = -EHOSTDOWN;
321 return rc;
322}
323
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700324static unsigned int
325smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
326{
327 struct TCP_Server_Info *server = tcon->ses->server;
328 unsigned int wsize;
329
330 /* start with specified wsize, or default */
331 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
332 wsize = min_t(unsigned int, wsize, server->max_write);
Long Li09902f82017-11-22 17:38:39 -0700333#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700334 if (server->rdma) {
335 if (server->sign)
336 wsize = min_t(unsigned int,
337 wsize, server->smbd_conn->max_fragmented_send_size);
338 else
339 wsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700340 wsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700341 }
Long Li09902f82017-11-22 17:38:39 -0700342#endif
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400343 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
344 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700345
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700346 return wsize;
347}
348
349static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500350smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
351{
352 struct TCP_Server_Info *server = tcon->ses->server;
353 unsigned int wsize;
354
355 /* start with specified wsize, or default */
356 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
357 wsize = min_t(unsigned int, wsize, server->max_write);
358#ifdef CONFIG_CIFS_SMB_DIRECT
359 if (server->rdma) {
360 if (server->sign)
361 wsize = min_t(unsigned int,
362 wsize, server->smbd_conn->max_fragmented_send_size);
363 else
364 wsize = min_t(unsigned int,
365 wsize, server->smbd_conn->max_readwrite_size);
366 }
367#endif
368 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
369 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
370
371 return wsize;
372}
373
374static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700375smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
376{
377 struct TCP_Server_Info *server = tcon->ses->server;
378 unsigned int rsize;
379
380 /* start with specified rsize, or default */
381 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
382 rsize = min_t(unsigned int, rsize, server->max_read);
Long Li09902f82017-11-22 17:38:39 -0700383#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700384 if (server->rdma) {
385 if (server->sign)
386 rsize = min_t(unsigned int,
387 rsize, server->smbd_conn->max_fragmented_recv_size);
388 else
389 rsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700390 rsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700391 }
Long Li09902f82017-11-22 17:38:39 -0700392#endif
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400393
394 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
395 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700396
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700397 return rsize;
398}
399
Steve French3d621232018-09-25 15:33:47 -0500400static unsigned int
401smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
402{
403 struct TCP_Server_Info *server = tcon->ses->server;
404 unsigned int rsize;
405
406 /* start with specified rsize, or default */
407 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
408 rsize = min_t(unsigned int, rsize, server->max_read);
409#ifdef CONFIG_CIFS_SMB_DIRECT
410 if (server->rdma) {
411 if (server->sign)
412 rsize = min_t(unsigned int,
413 rsize, server->smbd_conn->max_fragmented_recv_size);
414 else
415 rsize = min_t(unsigned int,
416 rsize, server->smbd_conn->max_readwrite_size);
417 }
418#endif
419
420 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
421 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
422
423 return rsize;
424}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200425
426static int
427parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
428 size_t buf_len,
429 struct cifs_server_iface **iface_list,
430 size_t *iface_count)
431{
432 struct network_interface_info_ioctl_rsp *p;
433 struct sockaddr_in *addr4;
434 struct sockaddr_in6 *addr6;
435 struct iface_info_ipv4 *p4;
436 struct iface_info_ipv6 *p6;
437 struct cifs_server_iface *info;
438 ssize_t bytes_left;
439 size_t next = 0;
440 int nb_iface = 0;
441 int rc = 0;
442
443 *iface_list = NULL;
444 *iface_count = 0;
445
446 /*
447 * Fist pass: count and sanity check
448 */
449
450 bytes_left = buf_len;
451 p = buf;
452 while (bytes_left >= sizeof(*p)) {
453 nb_iface++;
454 next = le32_to_cpu(p->Next);
455 if (!next) {
456 bytes_left -= sizeof(*p);
457 break;
458 }
459 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
460 bytes_left -= next;
461 }
462
463 if (!nb_iface) {
464 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
465 rc = -EINVAL;
466 goto out;
467 }
468
469 if (bytes_left || p->Next)
470 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
471
472
473 /*
474 * Second pass: extract info to internal structure
475 */
476
477 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
478 if (!*iface_list) {
479 rc = -ENOMEM;
480 goto out;
481 }
482
483 info = *iface_list;
484 bytes_left = buf_len;
485 p = buf;
486 while (bytes_left >= sizeof(*p)) {
487 info->speed = le64_to_cpu(p->LinkSpeed);
488 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
489 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
490
491 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
492 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
493 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
494 le32_to_cpu(p->Capability));
495
496 switch (p->Family) {
497 /*
498 * The kernel and wire socket structures have the same
499 * layout and use network byte order but make the
500 * conversion explicit in case either one changes.
501 */
502 case INTERNETWORK:
503 addr4 = (struct sockaddr_in *)&info->sockaddr;
504 p4 = (struct iface_info_ipv4 *)p->Buffer;
505 addr4->sin_family = AF_INET;
506 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
507
508 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
509 addr4->sin_port = cpu_to_be16(CIFS_PORT);
510
511 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
512 &addr4->sin_addr);
513 break;
514 case INTERNETWORKV6:
515 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
516 p6 = (struct iface_info_ipv6 *)p->Buffer;
517 addr6->sin6_family = AF_INET6;
518 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
519
520 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
521 addr6->sin6_flowinfo = 0;
522 addr6->sin6_scope_id = 0;
523 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
524
525 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
526 &addr6->sin6_addr);
527 break;
528 default:
529 cifs_dbg(VFS,
530 "%s: skipping unsupported socket family\n",
531 __func__);
532 goto next_iface;
533 }
534
535 (*iface_count)++;
536 info++;
537next_iface:
538 next = le32_to_cpu(p->Next);
539 if (!next)
540 break;
541 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
542 bytes_left -= next;
543 }
544
545 if (!*iface_count) {
546 rc = -EINVAL;
547 goto out;
548 }
549
550out:
551 if (rc) {
552 kfree(*iface_list);
553 *iface_count = 0;
554 *iface_list = NULL;
555 }
556 return rc;
557}
558
559
Steve Frenchc481e9f2013-10-14 01:21:53 -0500560static int
561SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
562{
563 int rc;
564 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200565 struct network_interface_info_ioctl_rsp *out_buf = NULL;
566 struct cifs_server_iface *iface_list;
567 size_t iface_count;
568 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500569
570 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
571 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
572 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500573 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500574 if (rc == -EOPNOTSUPP) {
575 cifs_dbg(FYI,
576 "server does not support query network interfaces\n");
577 goto out;
578 } else if (rc != 0) {
Steve French9ffc5412014-10-16 15:13:14 -0500579 cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200580 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500581 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200582
583 rc = parse_server_interfaces(out_buf, ret_data_len,
584 &iface_list, &iface_count);
585 if (rc)
586 goto out;
587
588 spin_lock(&ses->iface_lock);
589 kfree(ses->iface_list);
590 ses->iface_list = iface_list;
591 ses->iface_count = iface_count;
592 ses->iface_last_update = jiffies;
593 spin_unlock(&ses->iface_lock);
594
595out:
Steve French24df1482016-09-29 04:20:23 -0500596 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500597 return rc;
598}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500599
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000600static void
601smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000602{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000603 struct cached_fid *cfid = container_of(ref, struct cached_fid,
604 refcount);
605
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000606 if (cfid->is_valid) {
607 cifs_dbg(FYI, "clear cached root file handle\n");
608 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
609 cfid->fid->volatile_fid);
610 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000611 cfid->file_all_info_is_valid = false;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000612 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000613}
614
615void close_shroot(struct cached_fid *cfid)
616{
617 mutex_lock(&cfid->fid_mutex);
618 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000619 mutex_unlock(&cfid->fid_mutex);
620}
621
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000622void
623smb2_cached_lease_break(struct work_struct *work)
624{
625 struct cached_fid *cfid = container_of(work,
626 struct cached_fid, lease_break);
627
628 close_shroot(cfid);
629}
630
Steve French3d4ef9a2018-04-25 22:19:09 -0500631/*
632 * Open the directory at the root of a share
633 */
634int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
635{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000636 struct cifs_ses *ses = tcon->ses;
637 struct TCP_Server_Info *server = ses->server;
638 struct cifs_open_parms oparms;
639 struct smb2_create_rsp *o_rsp = NULL;
640 struct smb2_query_info_rsp *qi_rsp = NULL;
641 int resp_buftype[2];
642 struct smb_rqst rqst[2];
643 struct kvec rsp_iov[2];
644 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
645 struct kvec qi_iov[1];
646 int rc, flags = 0;
647 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000648 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Steve French3d4ef9a2018-04-25 22:19:09 -0500649
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000650 mutex_lock(&tcon->crfid.fid_mutex);
651 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500652 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000653 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000654 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000655 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500656 return 0;
657 }
658
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000659 if (smb3_encryption_required(tcon))
660 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500661
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000662 memset(rqst, 0, sizeof(rqst));
663 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
664 memset(rsp_iov, 0, sizeof(rsp_iov));
665
666 /* Open */
667 memset(&open_iov, 0, sizeof(open_iov));
668 rqst[0].rq_iov = open_iov;
669 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
670
671 oparms.tcon = tcon;
672 oparms.create_options = 0;
673 oparms.desired_access = FILE_READ_ATTRIBUTES;
674 oparms.disposition = FILE_OPEN;
675 oparms.fid = pfid;
676 oparms.reconnect = false;
677
678 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
679 if (rc)
680 goto oshr_exit;
681 smb2_set_next_command(tcon, &rqst[0]);
682
683 memset(&qi_iov, 0, sizeof(qi_iov));
684 rqst[1].rq_iov = qi_iov;
685 rqst[1].rq_nvec = 1;
686
687 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
688 COMPOUND_FID, FILE_ALL_INFORMATION,
689 SMB2_O_INFO_FILE, 0,
690 sizeof(struct smb2_file_all_info) +
691 PATH_MAX * 2, 0, NULL);
692 if (rc)
693 goto oshr_exit;
694
695 smb2_set_related(&rqst[1]);
696
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200697 /*
698 * We do not hold the lock for the open because in case
699 * SMB2_open needs to reconnect, it will end up calling
700 * cifs_mark_open_files_invalid() which takes the lock again
701 * thus causing a deadlock
702 */
703
704 mutex_unlock(&tcon->crfid.fid_mutex);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000705 rc = compound_send_recv(xid, ses, flags, 2, rqst,
706 resp_buftype, rsp_iov);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200707 mutex_lock(&tcon->crfid.fid_mutex);
708
709 /*
710 * Now we need to check again as the cached root might have
711 * been successfully re-opened from a concurrent process
712 */
713
714 if (tcon->crfid.is_valid) {
715 /* work was already done */
716
717 /* stash fids for close() later */
718 struct cifs_fid fid = {
719 .persistent_fid = pfid->persistent_fid,
720 .volatile_fid = pfid->volatile_fid,
721 };
722
723 /*
724 * caller expects this func to set pfid to a valid
725 * cached root, so we copy the existing one and get a
726 * reference.
727 */
728 memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
729 kref_get(&tcon->crfid.refcount);
730
731 mutex_unlock(&tcon->crfid.fid_mutex);
732
733 if (rc == 0) {
734 /* close extra handle outside of crit sec */
735 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
736 }
737 goto oshr_free;
738 }
739
740 /* Cached root is still invalid, continue normaly */
741
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000742 if (rc)
743 goto oshr_exit;
744
745 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
746 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
747 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
748#ifdef CONFIG_CIFS_DEBUG2
749 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
750#endif /* CIFS_DEBUG2 */
751
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000752 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
753 tcon->crfid.tcon = tcon;
754 tcon->crfid.is_valid = true;
755 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000756
Steve French89a5bfa2019-07-18 17:22:18 -0500757 /* BB TBD check to see if oplock level check can be removed below */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000758 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
759 kref_get(&tcon->crfid.refcount);
Steve French89a5bfa2019-07-18 17:22:18 -0500760 smb2_parse_contexts(server, o_rsp,
761 &oparms.fid->epoch,
762 oparms.fid->lease_key, &oplock, NULL);
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000763 } else
764 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000765
766 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
767 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
768 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000769 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000770 le16_to_cpu(qi_rsp->OutputBufferOffset),
771 sizeof(struct smb2_file_all_info),
772 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000773 (char *)&tcon->crfid.file_all_info))
774 tcon->crfid.file_all_info_is_valid = 1;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000775
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200776oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000777 mutex_unlock(&tcon->crfid.fid_mutex);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200778oshr_free:
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000779 SMB2_open_free(&rqst[0]);
780 SMB2_query_info_free(&rqst[1]);
781 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
782 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French3d4ef9a2018-04-25 22:19:09 -0500783 return rc;
784}
785
Steve French34f62642013-10-09 02:07:00 -0500786static void
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500787smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
788{
789 int rc;
790 __le16 srch_path = 0; /* Null - open root of share */
791 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
792 struct cifs_open_parms oparms;
793 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500794 bool no_cached_open = tcon->nohandlecache;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500795
796 oparms.tcon = tcon;
797 oparms.desired_access = FILE_READ_ATTRIBUTES;
798 oparms.disposition = FILE_OPEN;
799 oparms.create_options = 0;
800 oparms.fid = &fid;
801 oparms.reconnect = false;
802
Steve French3d4ef9a2018-04-25 22:19:09 -0500803 if (no_cached_open)
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000804 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
805 NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500806 else
807 rc = open_shroot(xid, tcon, &fid);
808
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500809 if (rc)
810 return;
811
Steve Frenchc481e9f2013-10-14 01:21:53 -0500812 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500813
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500814 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
815 FS_ATTRIBUTE_INFORMATION);
816 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
817 FS_DEVICE_INFORMATION);
818 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500819 FS_VOLUME_INFORMATION);
820 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500821 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500822 if (no_cached_open)
823 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000824 else
825 close_shroot(&tcon->crfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500826}
827
828static void
Steve French34f62642013-10-09 02:07:00 -0500829smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
830{
831 int rc;
832 __le16 srch_path = 0; /* Null - open root of share */
833 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
834 struct cifs_open_parms oparms;
835 struct cifs_fid fid;
836
837 oparms.tcon = tcon;
838 oparms.desired_access = FILE_READ_ATTRIBUTES;
839 oparms.disposition = FILE_OPEN;
840 oparms.create_options = 0;
841 oparms.fid = &fid;
842 oparms.reconnect = false;
843
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000844 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500845 if (rc)
846 return;
847
Steven French21671142013-10-09 13:36:35 -0500848 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
849 FS_ATTRIBUTE_INFORMATION);
850 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
851 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500852 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -0500853}
854
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400855static int
856smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
857 struct cifs_sb_info *cifs_sb, const char *full_path)
858{
859 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400860 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700861 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400862 struct cifs_open_parms oparms;
863 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400864
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000865 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500866 return 0;
867
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400868 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
869 if (!utf16_path)
870 return -ENOMEM;
871
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400872 oparms.tcon = tcon;
873 oparms.desired_access = FILE_READ_ATTRIBUTES;
874 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -0500875 if (backup_cred(cifs_sb))
876 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
877 else
878 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400879 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400880 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400881
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000882 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400883 if (rc) {
884 kfree(utf16_path);
885 return rc;
886 }
887
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400888 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400889 kfree(utf16_path);
890 return rc;
891}
892
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400893static int
894smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
895 struct cifs_sb_info *cifs_sb, const char *full_path,
896 u64 *uniqueid, FILE_ALL_INFO *data)
897{
898 *uniqueid = le64_to_cpu(data->IndexNumber);
899 return 0;
900}
901
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700902static int
903smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
904 struct cifs_fid *fid, FILE_ALL_INFO *data)
905{
906 int rc;
907 struct smb2_file_all_info *smb2_data;
908
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400909 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700910 GFP_KERNEL);
911 if (smb2_data == NULL)
912 return -ENOMEM;
913
914 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
915 smb2_data);
916 if (!rc)
917 move_smb2_info_to_cifs(data, smb2_data);
918 kfree(smb2_data);
919 return rc;
920}
921
Arnd Bergmann1368f152017-09-05 11:24:15 +0200922#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000923static ssize_t
924move_smb2_ea_to_cifs(char *dst, size_t dst_size,
925 struct smb2_file_full_ea_info *src, size_t src_size,
926 const unsigned char *ea_name)
927{
928 int rc = 0;
929 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
930 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000931 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000932 size_t name_len, value_len, user_name_len;
933
934 while (src_size > 0) {
935 name = &src->ea_data[0];
936 name_len = (size_t)src->ea_name_length;
937 value = &src->ea_data[src->ea_name_length + 1];
938 value_len = (size_t)le16_to_cpu(src->ea_value_length);
939
Christoph Probsta205d502019-05-08 21:36:25 +0200940 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000941 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000942
943 if (src_size < 8 + name_len + 1 + value_len) {
944 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
945 rc = -EIO;
946 goto out;
947 }
948
949 if (ea_name) {
950 if (ea_name_len == name_len &&
951 memcmp(ea_name, name, name_len) == 0) {
952 rc = value_len;
953 if (dst_size == 0)
954 goto out;
955 if (dst_size < value_len) {
956 rc = -ERANGE;
957 goto out;
958 }
959 memcpy(dst, value, value_len);
960 goto out;
961 }
962 } else {
963 /* 'user.' plus a terminating null */
964 user_name_len = 5 + 1 + name_len;
965
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000966 if (buf_size == 0) {
967 /* skip copy - calc size only */
968 rc += user_name_len;
969 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000970 dst_size -= user_name_len;
971 memcpy(dst, "user.", 5);
972 dst += 5;
973 memcpy(dst, src->ea_data, name_len);
974 dst += name_len;
975 *dst = 0;
976 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000977 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000978 } else {
979 /* stop before overrun buffer */
980 rc = -ERANGE;
981 break;
982 }
983 }
984
985 if (!src->next_entry_offset)
986 break;
987
988 if (src_size < le32_to_cpu(src->next_entry_offset)) {
989 /* stop before overrun buffer */
990 rc = -ERANGE;
991 break;
992 }
993 src_size -= le32_to_cpu(src->next_entry_offset);
994 src = (void *)((char *)src +
995 le32_to_cpu(src->next_entry_offset));
996 }
997
998 /* didn't find the named attribute */
999 if (ea_name)
1000 rc = -ENODATA;
1001
1002out:
1003 return (ssize_t)rc;
1004}
1005
1006static ssize_t
1007smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1008 const unsigned char *path, const unsigned char *ea_name,
1009 char *ea_data, size_t buf_size,
1010 struct cifs_sb_info *cifs_sb)
1011{
1012 int rc;
1013 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001014 struct kvec rsp_iov = {NULL, 0};
1015 int buftype = CIFS_NO_BUFFER;
1016 struct smb2_query_info_rsp *rsp;
1017 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001018
1019 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1020 if (!utf16_path)
1021 return -ENOMEM;
1022
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001023 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1024 FILE_READ_EA,
1025 FILE_FULL_EA_INFORMATION,
1026 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +10001027 CIFSMaxBufSize -
1028 MAX_SMB2_CREATE_RESPONSE_SIZE -
1029 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001030 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001031 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001032 /*
1033 * If ea_name is NULL (listxattr) and there are no EAs,
1034 * return 0 as it's not an error. Otherwise, the specified
1035 * ea_name was not found.
1036 */
1037 if (!ea_name && rc == -ENODATA)
1038 rc = 0;
1039 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001040 }
1041
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001042 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1043 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1044 le32_to_cpu(rsp->OutputBufferLength),
1045 &rsp_iov,
1046 sizeof(struct smb2_file_full_ea_info));
1047 if (rc)
1048 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001049
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001050 info = (struct smb2_file_full_ea_info *)(
1051 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1052 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1053 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001054
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001055 qeas_exit:
1056 kfree(utf16_path);
1057 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001058 return rc;
1059}
1060
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001061
1062static int
1063smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1064 const char *path, const char *ea_name, const void *ea_value,
1065 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1066 struct cifs_sb_info *cifs_sb)
1067{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001068 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001069 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001070 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001071 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001072 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001073 struct smb_rqst rqst[3];
1074 int resp_buftype[3];
1075 struct kvec rsp_iov[3];
1076 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1077 struct cifs_open_parms oparms;
1078 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1079 struct cifs_fid fid;
1080 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1081 unsigned int size[1];
1082 void *data[1];
1083 struct smb2_file_full_ea_info *ea = NULL;
1084 struct kvec close_iov[1];
1085 int rc;
1086
1087 if (smb3_encryption_required(tcon))
1088 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001089
1090 if (ea_name_len > 255)
1091 return -EINVAL;
1092
1093 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1094 if (!utf16_path)
1095 return -ENOMEM;
1096
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001097 memset(rqst, 0, sizeof(rqst));
1098 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1099 memset(rsp_iov, 0, sizeof(rsp_iov));
1100
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001101 if (ses->server->ops->query_all_EAs) {
1102 if (!ea_value) {
1103 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1104 ea_name, NULL, 0,
1105 cifs_sb);
1106 if (rc == -ENODATA)
1107 goto sea_exit;
1108 }
1109 }
1110
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001111 /* Open */
1112 memset(&open_iov, 0, sizeof(open_iov));
1113 rqst[0].rq_iov = open_iov;
1114 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1115
1116 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001117 oparms.tcon = tcon;
1118 oparms.desired_access = FILE_WRITE_EA;
1119 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001120 if (backup_cred(cifs_sb))
1121 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1122 else
1123 oparms.create_options = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001124 oparms.fid = &fid;
1125 oparms.reconnect = false;
1126
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001127 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
1128 if (rc)
1129 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001130 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001131
1132
1133 /* Set Info */
1134 memset(&si_iov, 0, sizeof(si_iov));
1135 rqst[1].rq_iov = si_iov;
1136 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001137
1138 len = sizeof(ea) + ea_name_len + ea_value_len + 1;
1139 ea = kzalloc(len, GFP_KERNEL);
1140 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001141 rc = -ENOMEM;
1142 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001143 }
1144
1145 ea->ea_name_length = ea_name_len;
1146 ea->ea_value_length = cpu_to_le16(ea_value_len);
1147 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1148 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1149
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001150 size[0] = len;
1151 data[0] = ea;
1152
1153 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
1154 COMPOUND_FID, current->tgid,
1155 FILE_FULL_EA_INFORMATION,
1156 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001157 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001158 smb2_set_related(&rqst[1]);
1159
1160
1161 /* Close */
1162 memset(&close_iov, 0, sizeof(close_iov));
1163 rqst[2].rq_iov = close_iov;
1164 rqst[2].rq_nvec = 1;
1165 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
1166 smb2_set_related(&rqst[2]);
1167
1168 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1169 resp_buftype, rsp_iov);
1170
1171 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001172 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001173 kfree(utf16_path);
1174 SMB2_open_free(&rqst[0]);
1175 SMB2_set_info_free(&rqst[1]);
1176 SMB2_close_free(&rqst[2]);
1177 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1178 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1179 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001180 return rc;
1181}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001182#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001183
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001184static bool
1185smb2_can_echo(struct TCP_Server_Info *server)
1186{
1187 return server->echoes;
1188}
1189
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001190static void
1191smb2_clear_stats(struct cifs_tcon *tcon)
1192{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001193 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001194
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001195 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1196 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1197 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1198 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001199}
1200
1201static void
Steve French769ee6a2013-06-19 14:15:30 -05001202smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1203{
1204 seq_puts(m, "\n\tShare Capabilities:");
1205 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1206 seq_puts(m, " DFS,");
1207 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1208 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1209 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1210 seq_puts(m, " SCALEOUT,");
1211 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1212 seq_puts(m, " CLUSTER,");
1213 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1214 seq_puts(m, " ASYMMETRIC,");
1215 if (tcon->capabilities == 0)
1216 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001217 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1218 seq_puts(m, " Aligned,");
1219 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1220 seq_puts(m, " Partition Aligned,");
1221 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1222 seq_puts(m, " SSD,");
1223 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1224 seq_puts(m, " TRIM-support,");
1225
Steve French769ee6a2013-06-19 14:15:30 -05001226 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001227 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001228 if (tcon->perf_sector_size)
1229 seq_printf(m, "\tOptimal sector size: 0x%x",
1230 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001231 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001232}
1233
1234static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001235smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1236{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001237 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1238 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001239
1240 /*
1241 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1242 * totals (requests sent) since those SMBs are per-session not per tcon
1243 */
Steve French52ce1ac2018-07-31 01:46:47 -05001244 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1245 (long long)(tcon->bytes_read),
1246 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001247 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1248 atomic_read(&tcon->num_local_opens),
1249 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001250 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001251 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1252 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001253 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001254 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1255 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001256 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001257 atomic_read(&sent[SMB2_CREATE_HE]),
1258 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001259 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001260 atomic_read(&sent[SMB2_CLOSE_HE]),
1261 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001262 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001263 atomic_read(&sent[SMB2_FLUSH_HE]),
1264 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001265 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001266 atomic_read(&sent[SMB2_READ_HE]),
1267 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001268 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001269 atomic_read(&sent[SMB2_WRITE_HE]),
1270 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001271 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001272 atomic_read(&sent[SMB2_LOCK_HE]),
1273 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001274 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001275 atomic_read(&sent[SMB2_IOCTL_HE]),
1276 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001277 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001278 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1279 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001280 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001281 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1282 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001283 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001284 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1285 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001286 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001287 atomic_read(&sent[SMB2_SET_INFO_HE]),
1288 atomic_read(&failed[SMB2_SET_INFO_HE]));
1289 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1290 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1291 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001292}
1293
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001294static void
1295smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1296{
David Howells2b0143b2015-03-17 22:25:59 +00001297 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001298 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1299
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001300 cfile->fid.persistent_fid = fid->persistent_fid;
1301 cfile->fid.volatile_fid = fid->volatile_fid;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001302#ifdef CONFIG_CIFS_DEBUG2
1303 cfile->fid.mid = fid->mid;
1304#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001305 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1306 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001307 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001308 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001309}
1310
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001311static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001312smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1313 struct cifs_fid *fid)
1314{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001315 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001316}
1317
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001318static int
Steve French41c13582013-11-14 00:05:36 -06001319SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1320 u64 persistent_fid, u64 volatile_fid,
1321 struct copychunk_ioctl *pcchunk)
1322{
1323 int rc;
1324 unsigned int ret_data_len;
1325 struct resume_key_req *res_key;
1326
1327 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1328 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001329 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001330 (char **)&res_key, &ret_data_len);
1331
1332 if (rc) {
1333 cifs_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
1334 goto req_res_key_exit;
1335 }
1336 if (ret_data_len < sizeof(struct resume_key_req)) {
1337 cifs_dbg(VFS, "Invalid refcopy resume key length\n");
1338 rc = -EINVAL;
1339 goto req_res_key_exit;
1340 }
1341 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1342
1343req_res_key_exit:
1344 kfree(res_key);
1345 return rc;
1346}
1347
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001348static int
1349smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001350 struct cifs_tcon *tcon,
1351 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001352 unsigned long p)
1353{
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001354 struct cifs_ses *ses = tcon->ses;
1355 char __user *arg = (char __user *)p;
1356 struct smb_query_info qi;
1357 struct smb_query_info __user *pqi;
1358 int rc = 0;
1359 int flags = 0;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001360 struct smb2_query_info_rsp *qi_rsp = NULL;
1361 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001362 void *buffer = NULL;
1363 struct smb_rqst rqst[3];
1364 int resp_buftype[3];
1365 struct kvec rsp_iov[3];
1366 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1367 struct cifs_open_parms oparms;
1368 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1369 struct cifs_fid fid;
1370 struct kvec qi_iov[1];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001371 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001372 struct kvec close_iov[1];
1373
1374 memset(rqst, 0, sizeof(rqst));
1375 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1376 memset(rsp_iov, 0, sizeof(rsp_iov));
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001377
1378 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1379 return -EFAULT;
1380
1381 if (qi.output_buffer_length > 1024)
1382 return -EINVAL;
1383
1384 if (!ses || !(ses->server))
1385 return -EIO;
1386
1387 if (smb3_encryption_required(tcon))
1388 flags |= CIFS_TRANSFORM_REQ;
1389
1390 buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
1391 if (buffer == NULL)
1392 return -ENOMEM;
1393
1394 if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
1395 qi.output_buffer_length)) {
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001396 rc = -EFAULT;
1397 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001398 }
1399
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001400 /* Open */
1401 memset(&open_iov, 0, sizeof(open_iov));
1402 rqst[0].rq_iov = open_iov;
1403 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001404
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001405 memset(&oparms, 0, sizeof(oparms));
1406 oparms.tcon = tcon;
1407 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
1408 oparms.disposition = FILE_OPEN;
1409 if (is_dir)
1410 oparms.create_options = CREATE_NOT_FILE;
1411 else
1412 oparms.create_options = CREATE_NOT_DIR;
1413 oparms.fid = &fid;
1414 oparms.reconnect = false;
1415
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001416 /*
1417 * FSCTL codes encode the special access they need in the fsctl code.
1418 */
1419 if (qi.flags & PASSTHRU_FSCTL) {
1420 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1421 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1422 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001423 break;
1424 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1425 oparms.desired_access = GENERIC_ALL;
1426 break;
1427 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1428 oparms.desired_access = GENERIC_READ;
1429 break;
1430 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1431 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001432 break;
1433 }
1434 }
1435
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001436 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1437 if (rc)
1438 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001439 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001440
1441 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001442 if (qi.flags & PASSTHRU_FSCTL) {
1443 /* Can eventually relax perm check since server enforces too */
1444 if (!capable(CAP_SYS_ADMIN))
1445 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001446 else {
1447 memset(&io_iov, 0, sizeof(io_iov));
1448 rqst[1].rq_iov = io_iov;
1449 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1450
1451 rc = SMB2_ioctl_init(tcon, &rqst[1],
1452 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001453 qi.info_type, true, buffer,
1454 qi.output_buffer_length,
1455 CIFSMaxBufSize);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001456 }
Steve French31ba4332019-03-13 02:40:07 -05001457 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1458 memset(&qi_iov, 0, sizeof(qi_iov));
1459 rqst[1].rq_iov = qi_iov;
1460 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001461
Steve French31ba4332019-03-13 02:40:07 -05001462 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
1463 COMPOUND_FID, qi.file_info_class,
1464 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001465 qi.input_buffer_length,
1466 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001467 } else { /* unknown flags */
1468 cifs_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
1469 rc = -EINVAL;
1470 }
1471
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001472 if (rc)
1473 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001474 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001475 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001476
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001477 /* Close */
1478 memset(&close_iov, 0, sizeof(close_iov));
1479 rqst[2].rq_iov = close_iov;
1480 rqst[2].rq_nvec = 1;
1481
1482 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001483 if (rc)
1484 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001485 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001486
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001487 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1488 resp_buftype, rsp_iov);
1489 if (rc)
1490 goto iqinf_exit;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001491 if (qi.flags & PASSTHRU_FSCTL) {
1492 pqi = (struct smb_query_info __user *)arg;
1493 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1494 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1495 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001496 if (qi.input_buffer_length > 0 &&
1497 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length > rsp_iov[1].iov_len) {
1498 rc = -EFAULT;
1499 goto iqinf_exit;
1500 }
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001501 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1502 sizeof(qi.input_buffer_length))) {
1503 rc = -EFAULT;
1504 goto iqinf_exit;
1505 }
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001506 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1507 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
1508 qi.input_buffer_length)) {
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001509 rc = -EFAULT;
1510 goto iqinf_exit;
1511 }
1512 } else {
1513 pqi = (struct smb_query_info __user *)arg;
1514 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1515 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1516 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
1517 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1518 sizeof(qi.input_buffer_length))) {
1519 rc = -EFAULT;
1520 goto iqinf_exit;
1521 }
1522 if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) {
1523 rc = -EFAULT;
1524 goto iqinf_exit;
1525 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001526 }
1527
1528 iqinf_exit:
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001529 kfree(buffer);
1530 SMB2_open_free(&rqst[0]);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001531 if (qi.flags & PASSTHRU_FSCTL)
1532 SMB2_ioctl_free(&rqst[1]);
1533 else
1534 SMB2_query_info_free(&rqst[1]);
1535
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001536 SMB2_close_free(&rqst[2]);
1537 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1538 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1539 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001540 return rc;
1541}
1542
Sachin Prabhu620d8742017-02-10 16:03:51 +05301543static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001544smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001545 struct cifsFileInfo *srcfile,
1546 struct cifsFileInfo *trgtfile, u64 src_off,
1547 u64 len, u64 dest_off)
1548{
1549 int rc;
1550 unsigned int ret_data_len;
1551 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001552 struct copychunk_ioctl_rsp *retbuf = NULL;
1553 struct cifs_tcon *tcon;
1554 int chunks_copied = 0;
1555 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301556 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001557
1558 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1559
1560 if (pcchunk == NULL)
1561 return -ENOMEM;
1562
Christoph Probsta205d502019-05-08 21:36:25 +02001563 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001564 /* Request a key from the server to identify the source of the copy */
1565 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1566 srcfile->fid.persistent_fid,
1567 srcfile->fid.volatile_fid, pcchunk);
1568
1569 /* Note: request_res_key sets res_key null only if rc !=0 */
1570 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001571 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001572
1573 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001574 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001575 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001576 pcchunk->Reserved2 = 0;
1577
Steve French9bf0c9c2013-11-16 18:05:28 -06001578 tcon = tlink_tcon(trgtfile->tlink);
1579
1580 while (len > 0) {
1581 pcchunk->SourceOffset = cpu_to_le64(src_off);
1582 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1583 pcchunk->Length =
1584 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1585
1586 /* Request server copy to target from src identified by key */
1587 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001588 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001589 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001590 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1591 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001592 if (rc == 0) {
1593 if (ret_data_len !=
1594 sizeof(struct copychunk_ioctl_rsp)) {
1595 cifs_dbg(VFS, "invalid cchunk response size\n");
1596 rc = -EIO;
1597 goto cchunk_out;
1598 }
1599 if (retbuf->TotalBytesWritten == 0) {
1600 cifs_dbg(FYI, "no bytes copied\n");
1601 rc = -EIO;
1602 goto cchunk_out;
1603 }
1604 /*
1605 * Check if server claimed to write more than we asked
1606 */
1607 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1608 le32_to_cpu(pcchunk->Length)) {
1609 cifs_dbg(VFS, "invalid copy chunk response\n");
1610 rc = -EIO;
1611 goto cchunk_out;
1612 }
1613 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
1614 cifs_dbg(VFS, "invalid num chunks written\n");
1615 rc = -EIO;
1616 goto cchunk_out;
1617 }
1618 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001619
Sachin Prabhu620d8742017-02-10 16:03:51 +05301620 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1621 src_off += bytes_written;
1622 dest_off += bytes_written;
1623 len -= bytes_written;
1624 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001625
Sachin Prabhu620d8742017-02-10 16:03:51 +05301626 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001627 le32_to_cpu(retbuf->ChunksWritten),
1628 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301629 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001630 } else if (rc == -EINVAL) {
1631 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1632 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001633
Steve French9bf0c9c2013-11-16 18:05:28 -06001634 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1635 le32_to_cpu(retbuf->ChunksWritten),
1636 le32_to_cpu(retbuf->ChunkBytesWritten),
1637 le32_to_cpu(retbuf->TotalBytesWritten));
1638
1639 /*
1640 * Check if this is the first request using these sizes,
1641 * (ie check if copy succeed once with original sizes
1642 * and check if the server gave us different sizes after
1643 * we already updated max sizes on previous request).
1644 * if not then why is the server returning an error now
1645 */
1646 if ((chunks_copied != 0) || chunk_sizes_updated)
1647 goto cchunk_out;
1648
1649 /* Check that server is not asking us to grow size */
1650 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1651 tcon->max_bytes_chunk)
1652 tcon->max_bytes_chunk =
1653 le32_to_cpu(retbuf->ChunkBytesWritten);
1654 else
1655 goto cchunk_out; /* server gave us bogus size */
1656
1657 /* No need to change MaxChunks since already set to 1 */
1658 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001659 } else
1660 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001661 }
1662
1663cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001664 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001665 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301666 if (rc)
1667 return rc;
1668 else
1669 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001670}
1671
1672static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001673smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1674 struct cifs_fid *fid)
1675{
1676 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1677}
1678
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001679static unsigned int
1680smb2_read_data_offset(char *buf)
1681{
1682 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001683
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001684 return rsp->DataOffset;
1685}
1686
1687static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001688smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001689{
1690 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001691
1692 if (in_remaining)
1693 return le32_to_cpu(rsp->DataRemaining);
1694
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001695 return le32_to_cpu(rsp->DataLength);
1696}
1697
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001698
1699static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001700smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001701 struct cifs_io_parms *parms, unsigned int *bytes_read,
1702 char **buf, int *buf_type)
1703{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001704 parms->persistent_fid = pfid->persistent_fid;
1705 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001706 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1707}
1708
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001709static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001710smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001711 struct cifs_io_parms *parms, unsigned int *written,
1712 struct kvec *iov, unsigned long nr_segs)
1713{
1714
Steve Frenchdb8b6312014-09-22 05:13:55 -05001715 parms->persistent_fid = pfid->persistent_fid;
1716 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001717 return SMB2_write(xid, parms, written, iov, nr_segs);
1718}
1719
Steve Frenchd43cc792014-08-13 17:16:29 -05001720/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1721static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1722 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1723{
1724 struct cifsInodeInfo *cifsi;
1725 int rc;
1726
1727 cifsi = CIFS_I(inode);
1728
1729 /* if file already sparse don't bother setting sparse again */
1730 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1731 return true; /* already sparse */
1732
1733 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1734 return true; /* already not sparse */
1735
1736 /*
1737 * Can't check for sparse support on share the usual way via the
1738 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1739 * since Samba server doesn't set the flag on the share, yet
1740 * supports the set sparse FSCTL and returns sparse correctly
1741 * in the file attributes. If we fail setting sparse though we
1742 * mark that server does not support sparse files for this share
1743 * to avoid repeatedly sending the unsupported fsctl to server
1744 * if the file is repeatedly extended.
1745 */
1746 if (tcon->broken_sparse_sup)
1747 return false;
1748
1749 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1750 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001751 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05001752 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001753 if (rc) {
1754 tcon->broken_sparse_sup = true;
1755 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1756 return false;
1757 }
1758
1759 if (setsparse)
1760 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1761 else
1762 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1763
1764 return true;
1765}
1766
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001767static int
1768smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1769 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1770{
1771 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001772 struct inode *inode;
1773
1774 /*
1775 * If extending file more than one page make sparse. Many Linux fs
1776 * make files sparse by default when extending via ftruncate
1777 */
David Howells2b0143b2015-03-17 22:25:59 +00001778 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001779
1780 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001781 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001782
Steve Frenchd43cc792014-08-13 17:16:29 -05001783 /* whether set sparse succeeds or not, extend the file */
1784 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001785 }
1786
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001787 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001788 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001789}
1790
Steve French02b16662015-06-27 21:18:36 -07001791static int
1792smb2_duplicate_extents(const unsigned int xid,
1793 struct cifsFileInfo *srcfile,
1794 struct cifsFileInfo *trgtfile, u64 src_off,
1795 u64 len, u64 dest_off)
1796{
1797 int rc;
1798 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001799 struct duplicate_extents_to_file dup_ext_buf;
1800 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1801
1802 /* server fileays advertise duplicate extent support with this flag */
1803 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1804 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1805 return -EOPNOTSUPP;
1806
1807 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1808 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1809 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1810 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1811 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02001812 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07001813 src_off, dest_off, len);
1814
1815 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1816 if (rc)
1817 goto duplicate_extents_out;
1818
1819 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1820 trgtfile->fid.volatile_fid,
1821 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001822 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001823 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001824 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05001825 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07001826 &ret_data_len);
1827
1828 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02001829 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07001830
1831duplicate_extents_out:
1832 return rc;
1833}
Steve French02b16662015-06-27 21:18:36 -07001834
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001835static int
Steve French64a5cfa2013-10-14 15:31:32 -05001836smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1837 struct cifsFileInfo *cfile)
1838{
1839 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1840 cfile->fid.volatile_fid);
1841}
1842
1843static int
Steve Frenchb3152e22015-06-24 03:17:02 -05001844smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1845 struct cifsFileInfo *cfile)
1846{
1847 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05001848 unsigned int ret_data_len;
1849
1850 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1851 integr_info.Flags = 0;
1852 integr_info.Reserved = 0;
1853
1854 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1855 cfile->fid.volatile_fid,
1856 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001857 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001858 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05001859 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05001860 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05001861 &ret_data_len);
1862
1863}
1864
Steve Frenche02789a2018-08-09 14:33:12 -05001865/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1866#define GMT_TOKEN_SIZE 50
1867
Steve French153322f2019-03-28 22:32:49 -05001868#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1869
Steve Frenche02789a2018-08-09 14:33:12 -05001870/*
1871 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1872 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1873 */
Steve Frenchb3152e22015-06-24 03:17:02 -05001874static int
Steve French834170c2016-09-30 21:14:26 -05001875smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1876 struct cifsFileInfo *cfile, void __user *ioc_buf)
1877{
1878 char *retbuf = NULL;
1879 unsigned int ret_data_len = 0;
1880 int rc;
Steve French153322f2019-03-28 22:32:49 -05001881 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05001882 struct smb_snapshot_array snapshot_in;
1883
Steve French973189a2019-04-04 00:41:04 -05001884 /*
1885 * On the first query to enumerate the list of snapshots available
1886 * for this volume the buffer begins with 0 (number of snapshots
1887 * which can be returned is zero since at that point we do not know
1888 * how big the buffer needs to be). On the second query,
1889 * it (ret_data_len) is set to number of snapshots so we can
1890 * know to set the maximum response size larger (see below).
1891 */
Steve French153322f2019-03-28 22:32:49 -05001892 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
1893 return -EFAULT;
1894
1895 /*
1896 * Note that for snapshot queries that servers like Azure expect that
1897 * the first query be minimal size (and just used to get the number/size
1898 * of previous versions) so response size must be specified as EXACTLY
1899 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
1900 * of eight bytes.
1901 */
1902 if (ret_data_len == 0)
1903 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
1904 else
1905 max_response_size = CIFSMaxBufSize;
1906
Steve French834170c2016-09-30 21:14:26 -05001907 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1908 cfile->fid.volatile_fid,
1909 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001910 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001911 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05001912 (char **)&retbuf,
1913 &ret_data_len);
1914 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
1915 rc, ret_data_len);
1916 if (rc)
1917 return rc;
1918
1919 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
1920 /* Fixup buffer */
1921 if (copy_from_user(&snapshot_in, ioc_buf,
1922 sizeof(struct smb_snapshot_array))) {
1923 rc = -EFAULT;
1924 kfree(retbuf);
1925 return rc;
1926 }
Steve French834170c2016-09-30 21:14:26 -05001927
Steve Frenche02789a2018-08-09 14:33:12 -05001928 /*
1929 * Check for min size, ie not large enough to fit even one GMT
1930 * token (snapshot). On the first ioctl some users may pass in
1931 * smaller size (or zero) to simply get the size of the array
1932 * so the user space caller can allocate sufficient memory
1933 * and retry the ioctl again with larger array size sufficient
1934 * to hold all of the snapshot GMT tokens on the second try.
1935 */
1936 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
1937 ret_data_len = sizeof(struct smb_snapshot_array);
1938
1939 /*
1940 * We return struct SRV_SNAPSHOT_ARRAY, followed by
1941 * the snapshot array (of 50 byte GMT tokens) each
1942 * representing an available previous version of the data
1943 */
1944 if (ret_data_len > (snapshot_in.snapshot_array_size +
1945 sizeof(struct smb_snapshot_array)))
1946 ret_data_len = snapshot_in.snapshot_array_size +
1947 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05001948
1949 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
1950 rc = -EFAULT;
1951 }
1952
1953 kfree(retbuf);
1954 return rc;
1955}
1956
1957static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001958smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
1959 const char *path, struct cifs_sb_info *cifs_sb,
1960 struct cifs_fid *fid, __u16 search_flags,
1961 struct cifs_search_info *srch_inf)
1962{
1963 __le16 *utf16_path;
1964 int rc;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001965 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001966 struct cifs_open_parms oparms;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001967
1968 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1969 if (!utf16_path)
1970 return -ENOMEM;
1971
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001972 oparms.tcon = tcon;
1973 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
1974 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001975 if (backup_cred(cifs_sb))
1976 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1977 else
1978 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001979 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001980 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001981
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10001982 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001983 kfree(utf16_path);
1984 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07001985 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001986 return rc;
1987 }
1988
1989 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02001990 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001991
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001992 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
1993 fid->volatile_fid, 0, srch_inf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001994 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07001995 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001996 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001997 }
1998 return rc;
1999}
2000
2001static int
2002smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2003 struct cifs_fid *fid, __u16 search_flags,
2004 struct cifs_search_info *srch_inf)
2005{
2006 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2007 fid->volatile_fid, 0, srch_inf);
2008}
2009
2010static int
2011smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2012 struct cifs_fid *fid)
2013{
2014 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2015}
2016
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002017/*
Christoph Probsta205d502019-05-08 21:36:25 +02002018 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2019 * the number of credits and return true. Otherwise - return false.
2020 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002021static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002022smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002023{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002024 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002025
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002026 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002027 return false;
2028
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002029 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002030 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002031 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002032 spin_unlock(&server->req_lock);
2033 wake_up(&server->request_q);
2034 }
2035
2036 return true;
2037}
2038
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002039static bool
2040smb2_is_session_expired(char *buf)
2041{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002042 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002043
Mark Symsd81243c2018-05-24 09:47:31 +01002044 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2045 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002046 return false;
2047
Steve Frenche68a9322018-07-30 14:23:58 -05002048 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2049 le16_to_cpu(shdr->Command),
2050 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002051 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002052
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002053 return true;
2054}
2055
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002056static int
2057smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2058 struct cifsInodeInfo *cinode)
2059{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002060 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2061 return SMB2_lease_break(0, tcon, cinode->lease_key,
2062 smb2_get_lease_state(cinode));
2063
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002064 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2065 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002066 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002067}
2068
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002069void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002070smb2_set_related(struct smb_rqst *rqst)
2071{
2072 struct smb2_sync_hdr *shdr;
2073
2074 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002075 if (shdr == NULL) {
2076 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2077 return;
2078 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002079 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2080}
2081
2082char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2083
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002084void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002085smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002086{
2087 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002088 struct cifs_ses *ses = tcon->ses;
2089 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002090 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002091 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002092
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002093 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2094 if (shdr == NULL) {
2095 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2096 return;
2097 }
2098
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002099 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002100
2101 /* No padding needed */
2102 if (!(len & 7))
2103 goto finished;
2104
2105 num_padding = 8 - (len & 7);
2106 if (!smb3_encryption_required(tcon)) {
2107 /*
2108 * If we do not have encryption then we can just add an extra
2109 * iov for the padding.
2110 */
2111 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2112 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2113 rqst->rq_nvec++;
2114 len += num_padding;
2115 } else {
2116 /*
2117 * We can not add a small padding iov for the encryption case
2118 * because the encryption framework can not handle the padding
2119 * iovs.
2120 * We have to flatten this into a single buffer and add
2121 * the padding to it.
2122 */
2123 for (i = 1; i < rqst->rq_nvec; i++) {
2124 memcpy(rqst->rq_iov[0].iov_base +
2125 rqst->rq_iov[0].iov_len,
2126 rqst->rq_iov[i].iov_base,
2127 rqst->rq_iov[i].iov_len);
2128 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002129 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002130 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2131 0, num_padding);
2132 rqst->rq_iov[0].iov_len += num_padding;
2133 len += num_padding;
2134 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002135 }
2136
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002137 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002138 shdr->NextCommand = cpu_to_le32(len);
2139}
2140
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002141/*
2142 * Passes the query info response back to the caller on success.
2143 * Caller need to free this with free_rsp_buf().
2144 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002145int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002146smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2147 __le16 *utf16_path, u32 desired_access,
2148 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002149 struct kvec *rsp, int *buftype,
2150 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002151{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002152 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002153 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002154 struct smb_rqst rqst[3];
2155 int resp_buftype[3];
2156 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002157 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002158 struct kvec qi_iov[1];
2159 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002160 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002161 struct cifs_open_parms oparms;
2162 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002163 int rc;
2164
2165 if (smb3_encryption_required(tcon))
2166 flags |= CIFS_TRANSFORM_REQ;
2167
2168 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002169 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002170 memset(rsp_iov, 0, sizeof(rsp_iov));
2171
2172 memset(&open_iov, 0, sizeof(open_iov));
2173 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002174 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002175
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002176 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002177 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002178 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002179 if (cifs_sb && backup_cred(cifs_sb))
2180 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2181 else
2182 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002183 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002184 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002185
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002186 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002187 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002188 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002189 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002190
2191 memset(&qi_iov, 0, sizeof(qi_iov));
2192 rqst[1].rq_iov = qi_iov;
2193 rqst[1].rq_nvec = 1;
2194
2195 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002196 class, type, 0,
2197 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002198 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002199 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002200 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002201 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002202 smb2_set_related(&rqst[1]);
2203
2204 memset(&close_iov, 0, sizeof(close_iov));
2205 rqst[2].rq_iov = close_iov;
2206 rqst[2].rq_nvec = 1;
2207
2208 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2209 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002210 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002211 smb2_set_related(&rqst[2]);
2212
2213 rc = compound_send_recv(xid, ses, flags, 3, rqst,
2214 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002215 if (rc) {
2216 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002217 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002218 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002219 *rsp = rsp_iov[1];
2220 *buftype = resp_buftype[1];
2221
2222 qic_exit:
2223 SMB2_open_free(&rqst[0]);
2224 SMB2_query_info_free(&rqst[1]);
2225 SMB2_close_free(&rqst[2]);
2226 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2227 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2228 return rc;
2229}
2230
2231static int
2232smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2233 struct kstatfs *buf)
2234{
2235 struct smb2_query_info_rsp *rsp;
2236 struct smb2_fs_full_size_info *info = NULL;
2237 __le16 utf16_path = 0; /* Null - open root of share */
2238 struct kvec rsp_iov = {NULL, 0};
2239 int buftype = CIFS_NO_BUFFER;
2240 int rc;
2241
2242
2243 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2244 FILE_READ_ATTRIBUTES,
2245 FS_FULL_SIZE_INFORMATION,
2246 SMB2_O_INFO_FILESYSTEM,
2247 sizeof(struct smb2_fs_full_size_info),
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002248 &rsp_iov, &buftype, NULL);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002249 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002250 goto qfs_exit;
2251
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002252 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002253 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002254 info = (struct smb2_fs_full_size_info *)(
2255 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2256 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2257 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002258 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002259 sizeof(struct smb2_fs_full_size_info));
2260 if (!rc)
2261 smb2_copy_fs_info_to_kstatfs(info, buf);
2262
2263qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002264 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002265 return rc;
2266}
2267
Steve French2d304212018-06-24 23:28:12 -05002268static int
2269smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2270 struct kstatfs *buf)
2271{
2272 int rc;
2273 __le16 srch_path = 0; /* Null - open root of share */
2274 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2275 struct cifs_open_parms oparms;
2276 struct cifs_fid fid;
2277
2278 if (!tcon->posix_extensions)
2279 return smb2_queryfs(xid, tcon, buf);
2280
2281 oparms.tcon = tcon;
2282 oparms.desired_access = FILE_READ_ATTRIBUTES;
2283 oparms.disposition = FILE_OPEN;
2284 oparms.create_options = 0;
2285 oparms.fid = &fid;
2286 oparms.reconnect = false;
2287
2288 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
2289 if (rc)
2290 return rc;
2291
2292 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2293 fid.volatile_fid, buf);
2294 buf->f_type = SMB2_MAGIC_NUMBER;
2295 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2296 return rc;
2297}
Steve French2d304212018-06-24 23:28:12 -05002298
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002299static bool
2300smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2301{
2302 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2303 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2304}
2305
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002306static int
2307smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2308 __u64 length, __u32 type, int lock, int unlock, bool wait)
2309{
2310 if (unlock && !lock)
2311 type = SMB2_LOCKFLAG_UNLOCK;
2312 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2313 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2314 current->tgid, length, offset, type, wait);
2315}
2316
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002317static void
2318smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2319{
2320 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2321}
2322
2323static void
2324smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2325{
2326 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2327}
2328
2329static void
2330smb2_new_lease_key(struct cifs_fid *fid)
2331{
Steve Frenchfa70b872016-09-22 00:39:34 -05002332 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002333}
2334
Aurelien Aptel9d496402017-02-13 16:16:49 +01002335static int
2336smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2337 const char *search_name,
2338 struct dfs_info3_param **target_nodes,
2339 unsigned int *num_of_nodes,
2340 const struct nls_table *nls_codepage, int remap)
2341{
2342 int rc;
2343 __le16 *utf16_path = NULL;
2344 int utf16_path_len = 0;
2345 struct cifs_tcon *tcon;
2346 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2347 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2348 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2349
Christoph Probsta205d502019-05-08 21:36:25 +02002350 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002351
2352 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002353 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002354 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002355 tcon = ses->tcon_ipc;
2356 if (tcon == NULL) {
2357 spin_lock(&cifs_tcp_ses_lock);
2358 tcon = list_first_entry_or_null(&ses->tcon_list,
2359 struct cifs_tcon,
2360 tcon_list);
2361 if (tcon)
2362 tcon->tc_count++;
2363 spin_unlock(&cifs_tcp_ses_lock);
2364 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002365
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002366 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002367 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2368 ses);
2369 rc = -ENOTCONN;
2370 goto out;
2371 }
2372
2373 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2374 &utf16_path_len,
2375 nls_codepage, remap);
2376 if (!utf16_path) {
2377 rc = -ENOMEM;
2378 goto out;
2379 }
2380
2381 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2382 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2383 if (!dfs_req) {
2384 rc = -ENOMEM;
2385 goto out;
2386 }
2387
2388 /* Highest DFS referral version understood */
2389 dfs_req->MaxReferralLevel = DFS_VERSION;
2390
2391 /* Path to resolve in an UTF-16 null-terminated string */
2392 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2393
2394 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002395 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2396 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002397 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002398 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002399 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002400 } while (rc == -EAGAIN);
2401
2402 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002403 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Christoph Probsta205d502019-05-08 21:36:25 +02002404 cifs_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002405 goto out;
2406 }
2407
2408 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2409 num_of_nodes, target_nodes,
2410 nls_codepage, remap, search_name,
2411 true /* is_unicode */);
2412 if (rc) {
Christoph Probsta205d502019-05-08 21:36:25 +02002413 cifs_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002414 goto out;
2415 }
2416
2417 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002418 if (tcon && !tcon->ipc) {
2419 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002420 spin_lock(&cifs_tcp_ses_lock);
2421 tcon->tc_count--;
2422 spin_unlock(&cifs_tcp_ses_lock);
2423 }
2424 kfree(utf16_path);
2425 kfree(dfs_req);
2426 kfree(dfs_rsp);
2427 return rc;
2428}
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002429
2430static int
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002431parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2432 u32 plen, char **target_path,
2433 struct cifs_sb_info *cifs_sb)
2434{
2435 unsigned int len;
2436
2437 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2438 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2439
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002440 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2441 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2442 le64_to_cpu(symlink_buf->InodeType));
2443 return -EOPNOTSUPP;
2444 }
2445
2446 *target_path = cifs_strndup_from_utf16(
2447 symlink_buf->PathBuffer,
2448 len, true, cifs_sb->local_nls);
2449 if (!(*target_path))
2450 return -ENOMEM;
2451
2452 convert_delimiter(*target_path, '/');
2453 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2454
2455 return 0;
2456}
2457
2458static int
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002459parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2460 u32 plen, char **target_path,
2461 struct cifs_sb_info *cifs_sb)
2462{
2463 unsigned int sub_len;
2464 unsigned int sub_offset;
2465
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002466 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002467
2468 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2469 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2470 if (sub_offset + 20 > plen ||
2471 sub_offset + sub_len + 20 > plen) {
2472 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2473 return -EIO;
2474 }
2475
2476 *target_path = cifs_strndup_from_utf16(
2477 symlink_buf->PathBuffer + sub_offset,
2478 sub_len, true, cifs_sb->local_nls);
2479 if (!(*target_path))
2480 return -ENOMEM;
2481
2482 convert_delimiter(*target_path, '/');
2483 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2484
2485 return 0;
2486}
2487
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002488static int
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002489parse_reparse_point(struct reparse_data_buffer *buf,
2490 u32 plen, char **target_path,
2491 struct cifs_sb_info *cifs_sb)
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002492{
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002493 if (plen < sizeof(struct reparse_data_buffer)) {
2494 cifs_dbg(VFS, "reparse buffer is too small. Must be "
2495 "at least 8 bytes but was %d\n", plen);
2496 return -EIO;
2497 }
2498
2499 if (plen < le16_to_cpu(buf->ReparseDataLength) +
2500 sizeof(struct reparse_data_buffer)) {
2501 cifs_dbg(VFS, "srv returned invalid reparse buf "
2502 "length: %d\n", plen);
2503 return -EIO;
2504 }
2505
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002506 /* See MS-FSCC 2.1.2 */
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002507 switch (le32_to_cpu(buf->ReparseTag)) {
2508 case IO_REPARSE_TAG_NFS:
2509 return parse_reparse_posix(
2510 (struct reparse_posix_data *)buf,
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002511 plen, target_path, cifs_sb);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002512 case IO_REPARSE_TAG_SYMLINK:
2513 return parse_reparse_symlink(
2514 (struct reparse_symlink_data_buffer *)buf,
2515 plen, target_path, cifs_sb);
2516 default:
2517 cifs_dbg(VFS, "srv returned unknown symlink buffer "
2518 "tag:0x%08x\n", le32_to_cpu(buf->ReparseTag));
2519 return -EOPNOTSUPP;
2520 }
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002521}
2522
Pavel Shilovsky78932422016-07-24 10:37:38 +03002523#define SMB2_SYMLINK_STRUCT_SIZE \
2524 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2525
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002526static int
2527smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002528 struct cifs_sb_info *cifs_sb, const char *full_path,
2529 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002530{
2531 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002532 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002533 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2534 struct cifs_open_parms oparms;
2535 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002536 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002537 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002538 struct smb2_symlink_err_rsp *symlink;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002539 unsigned int sub_len;
2540 unsigned int sub_offset;
2541 unsigned int print_len;
2542 unsigned int print_offset;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002543 int flags = 0;
2544 struct smb_rqst rqst[3];
2545 int resp_buftype[3];
2546 struct kvec rsp_iov[3];
2547 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2548 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
2549 struct kvec close_iov[1];
2550 struct smb2_create_rsp *create_rsp;
2551 struct smb2_ioctl_rsp *ioctl_rsp;
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002552 struct reparse_data_buffer *reparse_buf;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002553 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002554
2555 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2556
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002557 *target_path = NULL;
2558
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002559 if (smb3_encryption_required(tcon))
2560 flags |= CIFS_TRANSFORM_REQ;
2561
2562 memset(rqst, 0, sizeof(rqst));
2563 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2564 memset(rsp_iov, 0, sizeof(rsp_iov));
2565
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002566 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2567 if (!utf16_path)
2568 return -ENOMEM;
2569
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002570 /* Open */
2571 memset(&open_iov, 0, sizeof(open_iov));
2572 rqst[0].rq_iov = open_iov;
2573 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2574
2575 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002576 oparms.tcon = tcon;
2577 oparms.desired_access = FILE_READ_ATTRIBUTES;
2578 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002579
Steve French5e196972018-08-27 17:04:13 -05002580 if (backup_cred(cifs_sb))
2581 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2582 else
2583 oparms.create_options = 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002584 if (is_reparse_point)
2585 oparms.create_options = OPEN_REPARSE_POINT;
2586
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002587 oparms.fid = &fid;
2588 oparms.reconnect = false;
2589
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002590 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
2591 if (rc)
2592 goto querty_exit;
2593 smb2_set_next_command(tcon, &rqst[0]);
2594
2595
2596 /* IOCTL */
2597 memset(&io_iov, 0, sizeof(io_iov));
2598 rqst[1].rq_iov = io_iov;
2599 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
2600
2601 rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
2602 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
2603 true /* is_fctl */, NULL, 0, CIFSMaxBufSize);
2604 if (rc)
2605 goto querty_exit;
2606
2607 smb2_set_next_command(tcon, &rqst[1]);
2608 smb2_set_related(&rqst[1]);
2609
2610
2611 /* Close */
2612 memset(&close_iov, 0, sizeof(close_iov));
2613 rqst[2].rq_iov = close_iov;
2614 rqst[2].rq_nvec = 1;
2615
2616 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2617 if (rc)
2618 goto querty_exit;
2619
2620 smb2_set_related(&rqst[2]);
2621
2622 rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
2623 resp_buftype, rsp_iov);
2624
2625 create_rsp = rsp_iov[0].iov_base;
2626 if (create_rsp && create_rsp->sync_hdr.Status)
2627 err_iov = rsp_iov[0];
2628 ioctl_rsp = rsp_iov[1].iov_base;
2629
2630 /*
2631 * Open was successful and we got an ioctl response.
2632 */
2633 if ((rc == 0) && (is_reparse_point)) {
2634 /* See MS-FSCC 2.3.23 */
2635
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002636 reparse_buf = (struct reparse_data_buffer *)
2637 ((char *)ioctl_rsp +
2638 le32_to_cpu(ioctl_rsp->OutputOffset));
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002639 plen = le32_to_cpu(ioctl_rsp->OutputCount);
2640
2641 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
2642 rsp_iov[1].iov_len) {
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002643 cifs_dbg(VFS, "srv returned invalid ioctl len: %d\n",
2644 plen);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002645 rc = -EIO;
2646 goto querty_exit;
2647 }
2648
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002649 rc = parse_reparse_point(reparse_buf, plen, target_path,
2650 cifs_sb);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002651 goto querty_exit;
2652 }
2653
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002654 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002655 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002656 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002657 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002658
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002659 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002660 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002661 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002662 rc = -EINVAL;
2663 goto querty_exit;
2664 }
2665
2666 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2667 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
2668 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
2669 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002670 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002671 }
2672
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002673 /* open must fail on symlink - reset rc */
2674 rc = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002675 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2676 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002677 print_len = le16_to_cpu(symlink->PrintNameLength);
2678 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2679
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002680 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002681 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002682 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002683 }
2684
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002685 if (err_iov.iov_len <
2686 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002687 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002688 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002689 }
2690
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002691 *target_path = cifs_strndup_from_utf16(
2692 (char *)symlink->PathBuffer + sub_offset,
2693 sub_len, true, cifs_sb->local_nls);
2694 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002695 rc = -ENOMEM;
2696 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002697 }
2698 convert_delimiter(*target_path, '/');
2699 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002700
2701 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002702 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002703 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002704 SMB2_open_free(&rqst[0]);
2705 SMB2_ioctl_free(&rqst[1]);
2706 SMB2_close_free(&rqst[2]);
2707 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2708 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2709 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002710 return rc;
2711}
2712
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002713static struct cifs_ntsd *
2714get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2715 const struct cifs_fid *cifsfid, u32 *pacllen)
2716{
2717 struct cifs_ntsd *pntsd = NULL;
2718 unsigned int xid;
2719 int rc = -EOPNOTSUPP;
2720 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2721
2722 if (IS_ERR(tlink))
2723 return ERR_CAST(tlink);
2724
2725 xid = get_xid();
2726 cifs_dbg(FYI, "trying to get acl\n");
2727
2728 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2729 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2730 free_xid(xid);
2731
2732 cifs_put_tlink(tlink);
2733
2734 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2735 if (rc)
2736 return ERR_PTR(rc);
2737 return pntsd;
2738
2739}
2740
2741static struct cifs_ntsd *
2742get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2743 const char *path, u32 *pacllen)
2744{
2745 struct cifs_ntsd *pntsd = NULL;
2746 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2747 unsigned int xid;
2748 int rc;
2749 struct cifs_tcon *tcon;
2750 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2751 struct cifs_fid fid;
2752 struct cifs_open_parms oparms;
2753 __le16 *utf16_path;
2754
2755 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
2756 if (IS_ERR(tlink))
2757 return ERR_CAST(tlink);
2758
2759 tcon = tlink_tcon(tlink);
2760 xid = get_xid();
2761
2762 if (backup_cred(cifs_sb))
Colin Ian King709340a2017-07-05 13:47:34 +01002763 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002764 else
2765 oparms.create_options = 0;
2766
2767 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002768 if (!utf16_path) {
2769 rc = -ENOMEM;
2770 free_xid(xid);
2771 return ERR_PTR(rc);
2772 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002773
2774 oparms.tcon = tcon;
2775 oparms.desired_access = READ_CONTROL;
2776 oparms.disposition = FILE_OPEN;
2777 oparms.fid = &fid;
2778 oparms.reconnect = false;
2779
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002780 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002781 kfree(utf16_path);
2782 if (!rc) {
2783 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2784 fid.volatile_fid, (void **)&pntsd, pacllen);
2785 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2786 }
2787
2788 cifs_put_tlink(tlink);
2789 free_xid(xid);
2790
2791 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2792 if (rc)
2793 return ERR_PTR(rc);
2794 return pntsd;
2795}
2796
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002797static int
2798set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2799 struct inode *inode, const char *path, int aclflag)
2800{
2801 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2802 unsigned int xid;
2803 int rc, access_flags = 0;
2804 struct cifs_tcon *tcon;
2805 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2806 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2807 struct cifs_fid fid;
2808 struct cifs_open_parms oparms;
2809 __le16 *utf16_path;
2810
2811 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
2812 if (IS_ERR(tlink))
2813 return PTR_ERR(tlink);
2814
2815 tcon = tlink_tcon(tlink);
2816 xid = get_xid();
2817
2818 if (backup_cred(cifs_sb))
2819 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2820 else
2821 oparms.create_options = 0;
2822
2823 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
2824 access_flags = WRITE_OWNER;
2825 else
2826 access_flags = WRITE_DAC;
2827
2828 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002829 if (!utf16_path) {
2830 rc = -ENOMEM;
2831 free_xid(xid);
2832 return rc;
2833 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002834
2835 oparms.tcon = tcon;
2836 oparms.desired_access = access_flags;
2837 oparms.disposition = FILE_OPEN;
2838 oparms.path = path;
2839 oparms.fid = &fid;
2840 oparms.reconnect = false;
2841
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002842 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002843 kfree(utf16_path);
2844 if (!rc) {
2845 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2846 fid.volatile_fid, pnntsd, acllen, aclflag);
2847 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2848 }
2849
2850 cifs_put_tlink(tlink);
2851 free_xid(xid);
2852 return rc;
2853}
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002854
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002855/* Retrieve an ACL from the server */
2856static struct cifs_ntsd *
2857get_smb2_acl(struct cifs_sb_info *cifs_sb,
2858 struct inode *inode, const char *path,
2859 u32 *pacllen)
2860{
2861 struct cifs_ntsd *pntsd = NULL;
2862 struct cifsFileInfo *open_file = NULL;
2863
2864 if (inode)
2865 open_file = find_readable_file(CIFS_I(inode), true);
2866 if (!open_file)
2867 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
2868
2869 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
2870 cifsFileInfo_put(open_file);
2871 return pntsd;
2872}
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002873
Steve French30175622014-08-17 18:16:40 -05002874static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2875 loff_t offset, loff_t len, bool keep_size)
2876{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002877 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05002878 struct inode *inode;
2879 struct cifsInodeInfo *cifsi;
2880 struct cifsFileInfo *cfile = file->private_data;
2881 struct file_zero_data_information fsctl_buf;
2882 long rc;
2883 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002884 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05002885
2886 xid = get_xid();
2887
David Howells2b0143b2015-03-17 22:25:59 +00002888 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05002889 cifsi = CIFS_I(inode);
2890
Christoph Probsta205d502019-05-08 21:36:25 +02002891 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05002892 ses->Suid, offset, len);
2893
2894
Steve French30175622014-08-17 18:16:40 -05002895 /* if file not oplocked can't be sure whether asking to extend size */
2896 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002897 if (keep_size == false) {
2898 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002899 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
2900 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002901 free_xid(xid);
2902 return rc;
2903 }
Steve French30175622014-08-17 18:16:40 -05002904
Steve Frenchd1c35af2019-05-09 00:09:37 -05002905 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05002906
2907 fsctl_buf.FileOffset = cpu_to_le64(offset);
2908 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2909
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10002910 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2911 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
2912 (char *)&fsctl_buf,
2913 sizeof(struct file_zero_data_information),
2914 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002915 if (rc)
2916 goto zero_range_exit;
2917
2918 /*
2919 * do we also need to change the size of the file?
2920 */
2921 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002922 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10002923 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
2924 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002925 }
2926
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002927 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05002928 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05002929 if (rc)
2930 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
2931 ses->Suid, offset, len, rc);
2932 else
2933 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
2934 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05002935 return rc;
2936}
2937
Steve French31742c52014-08-17 08:38:47 -05002938static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2939 loff_t offset, loff_t len)
2940{
2941 struct inode *inode;
2942 struct cifsInodeInfo *cifsi;
2943 struct cifsFileInfo *cfile = file->private_data;
2944 struct file_zero_data_information fsctl_buf;
2945 long rc;
2946 unsigned int xid;
2947 __u8 set_sparse = 1;
2948
2949 xid = get_xid();
2950
David Howells2b0143b2015-03-17 22:25:59 +00002951 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05002952 cifsi = CIFS_I(inode);
2953
2954 /* Need to make file sparse, if not already, before freeing range. */
2955 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05002956 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
2957 rc = -EOPNOTSUPP;
2958 free_xid(xid);
2959 return rc;
2960 }
Steve French31742c52014-08-17 08:38:47 -05002961
Christoph Probsta205d502019-05-08 21:36:25 +02002962 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05002963
2964 fsctl_buf.FileOffset = cpu_to_le64(offset);
2965 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2966
2967 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2968 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002969 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05002970 sizeof(struct file_zero_data_information),
2971 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05002972 free_xid(xid);
2973 return rc;
2974}
2975
Steve French9ccf3212014-10-18 17:01:15 -05002976static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
2977 loff_t off, loff_t len, bool keep_size)
2978{
2979 struct inode *inode;
2980 struct cifsInodeInfo *cifsi;
2981 struct cifsFileInfo *cfile = file->private_data;
2982 long rc = -EOPNOTSUPP;
2983 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10002984 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05002985
2986 xid = get_xid();
2987
David Howells2b0143b2015-03-17 22:25:59 +00002988 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05002989 cifsi = CIFS_I(inode);
2990
Steve French779ede02019-03-13 01:41:49 -05002991 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
2992 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05002993 /* if file not oplocked can't be sure whether asking to extend size */
2994 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002995 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05002996 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
2997 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002998 free_xid(xid);
2999 return rc;
3000 }
Steve French9ccf3212014-10-18 17:01:15 -05003001
3002 /*
3003 * Files are non-sparse by default so falloc may be a no-op
3004 * Must check if file sparse. If not sparse, and not extending
3005 * then no need to do anything since file already allocated
3006 */
3007 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
3008 if (keep_size == true)
Steve Frenchcfe89092018-05-19 02:04:55 -05003009 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05003010 /* check if extending file */
3011 else if (i_size_read(inode) >= off + len)
3012 /* not extending file and already not sparse */
Steve Frenchcfe89092018-05-19 02:04:55 -05003013 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05003014 /* BB: in future add else clause to extend file */
3015 else
Steve Frenchcfe89092018-05-19 02:04:55 -05003016 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003017 if (rc)
3018 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3019 tcon->tid, tcon->ses->Suid, off, len, rc);
3020 else
3021 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid,
3022 tcon->tid, tcon->ses->Suid, off, len);
Steve Frenchcfe89092018-05-19 02:04:55 -05003023 free_xid(xid);
3024 return rc;
Steve French9ccf3212014-10-18 17:01:15 -05003025 }
3026
3027 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3028 /*
3029 * Check if falloc starts within first few pages of file
3030 * and ends within a few pages of the end of file to
3031 * ensure that most of file is being forced to be
3032 * fallocated now. If so then setting whole file sparse
3033 * ie potentially making a few extra pages at the beginning
3034 * or end of the file non-sparse via set_sparse is harmless.
3035 */
Steve Frenchcfe89092018-05-19 02:04:55 -05003036 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3037 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003038 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3039 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003040 free_xid(xid);
3041 return rc;
3042 }
Steve French9ccf3212014-10-18 17:01:15 -05003043
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003044 smb2_set_sparse(xid, tcon, cfile, inode, false);
3045 rc = 0;
3046 } else {
3047 smb2_set_sparse(xid, tcon, cfile, inode, false);
3048 rc = 0;
3049 if (i_size_read(inode) < off + len) {
3050 eof = cpu_to_le64(off + len);
3051 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3052 cfile->fid.volatile_fid, cfile->pid,
3053 &eof);
3054 }
Steve French9ccf3212014-10-18 17:01:15 -05003055 }
Steve French9ccf3212014-10-18 17:01:15 -05003056
Steve French779ede02019-03-13 01:41:49 -05003057 if (rc)
3058 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3059 tcon->ses->Suid, off, len, rc);
3060 else
3061 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3062 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003063
3064 free_xid(xid);
3065 return rc;
3066}
3067
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003068static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3069{
3070 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3071 struct cifsInodeInfo *cifsi;
3072 struct inode *inode;
3073 int rc = 0;
3074 struct file_allocated_range_buffer in_data, *out_data = NULL;
3075 u32 out_data_len;
3076 unsigned int xid;
3077
3078 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3079 return generic_file_llseek(file, offset, whence);
3080
3081 inode = d_inode(cfile->dentry);
3082 cifsi = CIFS_I(inode);
3083
3084 if (offset < 0 || offset >= i_size_read(inode))
3085 return -ENXIO;
3086
3087 xid = get_xid();
3088 /*
3089 * We need to be sure that all dirty pages are written as they
3090 * might fill holes on the server.
3091 * Note that we also MUST flush any written pages since at least
3092 * some servers (Windows2016) will not reflect recent writes in
3093 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3094 */
3095 wrcfile = find_writable_file(cifsi, false);
3096 if (wrcfile) {
3097 filemap_write_and_wait(inode->i_mapping);
3098 smb2_flush_file(xid, tcon, &wrcfile->fid);
3099 cifsFileInfo_put(wrcfile);
3100 }
3101
3102 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3103 if (whence == SEEK_HOLE)
3104 offset = i_size_read(inode);
3105 goto lseek_exit;
3106 }
3107
3108 in_data.file_offset = cpu_to_le64(offset);
3109 in_data.length = cpu_to_le64(i_size_read(inode));
3110
3111 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3112 cfile->fid.volatile_fid,
3113 FSCTL_QUERY_ALLOCATED_RANGES, true,
3114 (char *)&in_data, sizeof(in_data),
3115 sizeof(struct file_allocated_range_buffer),
3116 (char **)&out_data, &out_data_len);
3117 if (rc == -E2BIG)
3118 rc = 0;
3119 if (rc)
3120 goto lseek_exit;
3121
3122 if (whence == SEEK_HOLE && out_data_len == 0)
3123 goto lseek_exit;
3124
3125 if (whence == SEEK_DATA && out_data_len == 0) {
3126 rc = -ENXIO;
3127 goto lseek_exit;
3128 }
3129
3130 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3131 rc = -EINVAL;
3132 goto lseek_exit;
3133 }
3134 if (whence == SEEK_DATA) {
3135 offset = le64_to_cpu(out_data->file_offset);
3136 goto lseek_exit;
3137 }
3138 if (offset < le64_to_cpu(out_data->file_offset))
3139 goto lseek_exit;
3140
3141 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3142
3143 lseek_exit:
3144 free_xid(xid);
3145 kfree(out_data);
3146 if (!rc)
3147 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3148 else
3149 return rc;
3150}
3151
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003152static int smb3_fiemap(struct cifs_tcon *tcon,
3153 struct cifsFileInfo *cfile,
3154 struct fiemap_extent_info *fei, u64 start, u64 len)
3155{
3156 unsigned int xid;
3157 struct file_allocated_range_buffer in_data, *out_data;
3158 u32 out_data_len;
3159 int i, num, rc, flags, last_blob;
3160 u64 next;
3161
3162 if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
3163 return -EBADR;
3164
3165 xid = get_xid();
3166 again:
3167 in_data.file_offset = cpu_to_le64(start);
3168 in_data.length = cpu_to_le64(len);
3169
3170 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3171 cfile->fid.volatile_fid,
3172 FSCTL_QUERY_ALLOCATED_RANGES, true,
3173 (char *)&in_data, sizeof(in_data),
3174 1024 * sizeof(struct file_allocated_range_buffer),
3175 (char **)&out_data, &out_data_len);
3176 if (rc == -E2BIG) {
3177 last_blob = 0;
3178 rc = 0;
3179 } else
3180 last_blob = 1;
3181 if (rc)
3182 goto out;
3183
3184 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3185 rc = -EINVAL;
3186 goto out;
3187 }
3188 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3189 rc = -EINVAL;
3190 goto out;
3191 }
3192
3193 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3194 for (i = 0; i < num; i++) {
3195 flags = 0;
3196 if (i == num - 1 && last_blob)
3197 flags |= FIEMAP_EXTENT_LAST;
3198
3199 rc = fiemap_fill_next_extent(fei,
3200 le64_to_cpu(out_data[i].file_offset),
3201 le64_to_cpu(out_data[i].file_offset),
3202 le64_to_cpu(out_data[i].length),
3203 flags);
3204 if (rc < 0)
3205 goto out;
3206 if (rc == 1) {
3207 rc = 0;
3208 goto out;
3209 }
3210 }
3211
3212 if (!last_blob) {
3213 next = le64_to_cpu(out_data[num - 1].file_offset) +
3214 le64_to_cpu(out_data[num - 1].length);
3215 len = len - (next - start);
3216 start = next;
3217 goto again;
3218 }
3219
3220 out:
3221 free_xid(xid);
3222 kfree(out_data);
3223 return rc;
3224}
Steve French9ccf3212014-10-18 17:01:15 -05003225
Steve French31742c52014-08-17 08:38:47 -05003226static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3227 loff_t off, loff_t len)
3228{
3229 /* KEEP_SIZE already checked for by do_fallocate */
3230 if (mode & FALLOC_FL_PUNCH_HOLE)
3231 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05003232 else if (mode & FALLOC_FL_ZERO_RANGE) {
3233 if (mode & FALLOC_FL_KEEP_SIZE)
3234 return smb3_zero_range(file, tcon, off, len, true);
3235 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05003236 } else if (mode == FALLOC_FL_KEEP_SIZE)
3237 return smb3_simple_falloc(file, tcon, off, len, true);
3238 else if (mode == 0)
3239 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05003240
3241 return -EOPNOTSUPP;
3242}
3243
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003244static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003245smb2_downgrade_oplock(struct TCP_Server_Info *server,
3246 struct cifsInodeInfo *cinode, bool set_level2)
3247{
3248 if (set_level2)
3249 server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
3250 0, NULL);
3251 else
3252 server->ops->set_oplock_level(cinode, 0, 0, NULL);
3253}
3254
3255static void
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003256smb21_downgrade_oplock(struct TCP_Server_Info *server,
3257 struct cifsInodeInfo *cinode, bool set_level2)
3258{
3259 server->ops->set_oplock_level(cinode,
3260 set_level2 ? SMB2_LEASE_READ_CACHING_HE :
3261 0, 0, NULL);
3262}
3263
3264static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003265smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3266 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003267{
3268 oplock &= 0xFF;
3269 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3270 return;
3271 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003272 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003273 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3274 &cinode->vfs_inode);
3275 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003276 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003277 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3278 &cinode->vfs_inode);
3279 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3280 cinode->oplock = CIFS_CACHE_READ_FLG;
3281 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3282 &cinode->vfs_inode);
3283 } else
3284 cinode->oplock = 0;
3285}
3286
3287static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003288smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3289 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003290{
3291 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003292 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003293
3294 oplock &= 0xFF;
3295 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3296 return;
3297
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003298 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003299 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003300 strcat(message, "R");
3301 }
3302 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003303 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003304 strcat(message, "H");
3305 }
3306 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003307 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003308 strcat(message, "W");
3309 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003310 if (!new_oplock)
3311 strncpy(message, "None", sizeof(message));
3312
3313 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003314 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3315 &cinode->vfs_inode);
3316}
3317
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003318static void
3319smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3320 unsigned int epoch, bool *purge_cache)
3321{
3322 unsigned int old_oplock = cinode->oplock;
3323
3324 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3325
3326 if (purge_cache) {
3327 *purge_cache = false;
3328 if (old_oplock == CIFS_CACHE_READ_FLG) {
3329 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3330 (epoch - cinode->epoch > 0))
3331 *purge_cache = true;
3332 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3333 (epoch - cinode->epoch > 1))
3334 *purge_cache = true;
3335 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3336 (epoch - cinode->epoch > 1))
3337 *purge_cache = true;
3338 else if (cinode->oplock == 0 &&
3339 (epoch - cinode->epoch > 0))
3340 *purge_cache = true;
3341 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
3342 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3343 (epoch - cinode->epoch > 0))
3344 *purge_cache = true;
3345 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3346 (epoch - cinode->epoch > 1))
3347 *purge_cache = true;
3348 }
3349 cinode->epoch = epoch;
3350 }
3351}
3352
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003353static bool
3354smb2_is_read_op(__u32 oplock)
3355{
3356 return oplock == SMB2_OPLOCK_LEVEL_II;
3357}
3358
3359static bool
3360smb21_is_read_op(__u32 oplock)
3361{
3362 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3363 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3364}
3365
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003366static __le32
3367map_oplock_to_lease(u8 oplock)
3368{
3369 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3370 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3371 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3372 return SMB2_LEASE_READ_CACHING;
3373 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3374 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3375 SMB2_LEASE_WRITE_CACHING;
3376 return 0;
3377}
3378
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003379static char *
3380smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3381{
3382 struct create_lease *buf;
3383
3384 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
3385 if (!buf)
3386 return NULL;
3387
Stefano Brivio729c0c92018-07-05 15:10:02 +02003388 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003389 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003390
3391 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3392 (struct create_lease, lcontext));
3393 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
3394 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3395 (struct create_lease, Name));
3396 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003397 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003398 buf->Name[0] = 'R';
3399 buf->Name[1] = 'q';
3400 buf->Name[2] = 'L';
3401 buf->Name[3] = 's';
3402 return (char *)buf;
3403}
3404
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003405static char *
3406smb3_create_lease_buf(u8 *lease_key, u8 oplock)
3407{
3408 struct create_lease_v2 *buf;
3409
3410 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
3411 if (!buf)
3412 return NULL;
3413
Stefano Brivio729c0c92018-07-05 15:10:02 +02003414 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003415 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3416
3417 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3418 (struct create_lease_v2, lcontext));
3419 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
3420 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3421 (struct create_lease_v2, Name));
3422 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003423 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003424 buf->Name[0] = 'R';
3425 buf->Name[1] = 'q';
3426 buf->Name[2] = 'L';
3427 buf->Name[3] = 's';
3428 return (char *)buf;
3429}
3430
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003431static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003432smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003433{
3434 struct create_lease *lc = (struct create_lease *)buf;
3435
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003436 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003437 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3438 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3439 return le32_to_cpu(lc->lcontext.LeaseState);
3440}
3441
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003442static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003443smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003444{
3445 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
3446
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003447 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003448 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3449 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003450 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02003451 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003452 return le32_to_cpu(lc->lcontext.LeaseState);
3453}
3454
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003455static unsigned int
3456smb2_wp_retry_size(struct inode *inode)
3457{
3458 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
3459 SMB2_MAX_BUFFER_SIZE);
3460}
3461
Pavel Shilovsky52755802014-08-18 20:49:57 +04003462static bool
3463smb2_dir_needs_close(struct cifsFileInfo *cfile)
3464{
3465 return !cfile->invalidHandle;
3466}
3467
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003468static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003469fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
Steve French2b2f7542019-06-07 15:16:10 -05003470 struct smb_rqst *old_rq, __le16 cipher_type)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003471{
3472 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003473 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003474
3475 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
3476 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
3477 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
3478 tr_hdr->Flags = cpu_to_le16(0x01);
Steve French2b2f7542019-06-07 15:16:10 -05003479 if (cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3480 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3481 else
3482 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003483 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003484}
3485
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003486/* We can not use the normal sg_set_buf() as we will sometimes pass a
3487 * stack object as buf.
3488 */
3489static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3490 unsigned int buflen)
3491{
3492 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
3493}
3494
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003495/* Assumes the first rqst has a transform header as the first iov.
3496 * I.e.
3497 * rqst[0].rq_iov[0] is transform header
3498 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
3499 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003500 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003501static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003502init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003503{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003504 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003505 struct scatterlist *sg;
3506 unsigned int i;
3507 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003508 unsigned int idx = 0;
3509 int skip;
3510
3511 sg_len = 1;
3512 for (i = 0; i < num_rqst; i++)
3513 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003514
3515 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
3516 if (!sg)
3517 return NULL;
3518
3519 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003520 for (i = 0; i < num_rqst; i++) {
3521 for (j = 0; j < rqst[i].rq_nvec; j++) {
3522 /*
3523 * The first rqst has a transform header where the
3524 * first 20 bytes are not part of the encrypted blob
3525 */
3526 skip = (i == 0) && (j == 0) ? 20 : 0;
3527 smb2_sg_set_buf(&sg[idx++],
3528 rqst[i].rq_iov[j].iov_base + skip,
3529 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10003530 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05003531
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003532 for (j = 0; j < rqst[i].rq_npages; j++) {
3533 unsigned int len, offset;
3534
3535 rqst_page_get_length(&rqst[i], j, &len, &offset);
3536 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
3537 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003538 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003539 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003540 return sg;
3541}
3542
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003543static int
3544smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
3545{
3546 struct cifs_ses *ses;
3547 u8 *ses_enc_key;
3548
3549 spin_lock(&cifs_tcp_ses_lock);
3550 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3551 if (ses->Suid != ses_id)
3552 continue;
3553 ses_enc_key = enc ? ses->smb3encryptionkey :
3554 ses->smb3decryptionkey;
3555 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
3556 spin_unlock(&cifs_tcp_ses_lock);
3557 return 0;
3558 }
3559 spin_unlock(&cifs_tcp_ses_lock);
3560
3561 return 1;
3562}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003563/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003564 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
3565 * iov[0] - transform header (associate data),
3566 * iov[1-N] - SMB2 header and pages - data to encrypt.
3567 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003568 * untouched.
3569 */
3570static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003571crypt_message(struct TCP_Server_Info *server, int num_rqst,
3572 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003573{
3574 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003575 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003576 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003577 int rc = 0;
3578 struct scatterlist *sg;
3579 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003580 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003581 struct aead_request *req;
3582 char *iv;
3583 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003584 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003585 struct crypto_aead *tfm;
3586 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3587
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003588 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
3589 if (rc) {
3590 cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
3591 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003592 return 0;
3593 }
3594
3595 rc = smb3_crypto_aead_allocate(server);
3596 if (rc) {
3597 cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
3598 return rc;
3599 }
3600
3601 tfm = enc ? server->secmech.ccmaesencrypt :
3602 server->secmech.ccmaesdecrypt;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003603 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003604 if (rc) {
3605 cifs_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
3606 return rc;
3607 }
3608
3609 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3610 if (rc) {
3611 cifs_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
3612 return rc;
3613 }
3614
3615 req = aead_request_alloc(tfm, GFP_KERNEL);
3616 if (!req) {
Christoph Probsta205d502019-05-08 21:36:25 +02003617 cifs_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003618 return -ENOMEM;
3619 }
3620
3621 if (!enc) {
3622 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3623 crypt_len += SMB2_SIGNATURE_SIZE;
3624 }
3625
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003626 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003627 if (!sg) {
Christoph Probsta205d502019-05-08 21:36:25 +02003628 cifs_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003629 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003630 goto free_req;
3631 }
3632
3633 iv_len = crypto_aead_ivsize(tfm);
3634 iv = kzalloc(iv_len, GFP_KERNEL);
3635 if (!iv) {
Christoph Probsta205d502019-05-08 21:36:25 +02003636 cifs_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003637 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003638 goto free_sg;
3639 }
Steve French2b2f7542019-06-07 15:16:10 -05003640
3641 if (server->cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3642 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3643 else {
3644 iv[0] = 3;
3645 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
3646 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003647
3648 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
3649 aead_request_set_ad(req, assoc_data_len);
3650
3651 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003652 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003653
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003654 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
3655 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003656
3657 if (!rc && enc)
3658 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
3659
3660 kfree(iv);
3661free_sg:
3662 kfree(sg);
3663free_req:
3664 kfree(req);
3665 return rc;
3666}
3667
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003668void
3669smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003670{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003671 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003672
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003673 for (i = 0; i < num_rqst; i++) {
3674 if (rqst[i].rq_pages) {
3675 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
3676 put_page(rqst[i].rq_pages[j]);
3677 kfree(rqst[i].rq_pages);
3678 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003679 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003680}
3681
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003682/*
3683 * This function will initialize new_rq and encrypt the content.
3684 * The first entry, new_rq[0], only contains a single iov which contains
3685 * a smb2_transform_hdr and is pre-allocated by the caller.
3686 * This function then populates new_rq[1+] with the content from olq_rq[0+].
3687 *
3688 * The end result is an array of smb_rqst structures where the first structure
3689 * only contains a single iov for the transform header which we then can pass
3690 * to crypt_message().
3691 *
3692 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
3693 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
3694 */
3695static int
3696smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
3697 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003698{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003699 struct page **pages;
3700 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
3701 unsigned int npages;
3702 unsigned int orig_len = 0;
3703 int i, j;
3704 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003705
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003706 for (i = 1; i < num_rqst; i++) {
3707 npages = old_rq[i - 1].rq_npages;
3708 pages = kmalloc_array(npages, sizeof(struct page *),
3709 GFP_KERNEL);
3710 if (!pages)
3711 goto err_free;
3712
3713 new_rq[i].rq_pages = pages;
3714 new_rq[i].rq_npages = npages;
3715 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
3716 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
3717 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
3718 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
3719 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
3720
3721 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
3722
3723 for (j = 0; j < npages; j++) {
3724 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3725 if (!pages[j])
3726 goto err_free;
3727 }
3728
3729 /* copy pages form the old */
3730 for (j = 0; j < npages; j++) {
3731 char *dst, *src;
3732 unsigned int offset, len;
3733
3734 rqst_page_get_length(&new_rq[i], j, &len, &offset);
3735
3736 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
3737 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
3738
3739 memcpy(dst, src, len);
3740 kunmap(new_rq[i].rq_pages[j]);
3741 kunmap(old_rq[i - 1].rq_pages[j]);
3742 }
3743 }
3744
3745 /* fill the 1st iov with a transform header */
Steve French2b2f7542019-06-07 15:16:10 -05003746 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003747
3748 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02003749 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003750 if (rc)
3751 goto err_free;
3752
3753 return rc;
3754
3755err_free:
3756 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
3757 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003758}
3759
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003760static int
3761smb3_is_transform_hdr(void *buf)
3762{
3763 struct smb2_transform_hdr *trhdr = buf;
3764
3765 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
3766}
3767
3768static int
3769decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
3770 unsigned int buf_data_size, struct page **pages,
3771 unsigned int npages, unsigned int page_data_size)
3772{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003773 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003774 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003775 int rc;
3776
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003777 iov[0].iov_base = buf;
3778 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
3779 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
3780 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003781
3782 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003783 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003784 rqst.rq_pages = pages;
3785 rqst.rq_npages = npages;
3786 rqst.rq_pagesz = PAGE_SIZE;
3787 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
3788
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003789 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02003790 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003791
3792 if (rc)
3793 return rc;
3794
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003795 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003796
3797 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003798
3799 return rc;
3800}
3801
3802static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003803read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
3804 unsigned int npages, unsigned int len)
3805{
3806 int i;
3807 int length;
3808
3809 for (i = 0; i < npages; i++) {
3810 struct page *page = pages[i];
3811 size_t n;
3812
3813 n = len;
3814 if (len >= PAGE_SIZE) {
3815 /* enough data to fill the page */
3816 n = PAGE_SIZE;
3817 len -= n;
3818 } else {
3819 zero_user(page, len, PAGE_SIZE - len);
3820 len = 0;
3821 }
Long Li1dbe3462018-05-30 12:47:55 -07003822 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003823 if (length < 0)
3824 return length;
3825 server->total_read += length;
3826 }
3827
3828 return 0;
3829}
3830
3831static int
3832init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
3833 unsigned int cur_off, struct bio_vec **page_vec)
3834{
3835 struct bio_vec *bvec;
3836 int i;
3837
3838 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
3839 if (!bvec)
3840 return -ENOMEM;
3841
3842 for (i = 0; i < npages; i++) {
3843 bvec[i].bv_page = pages[i];
3844 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
3845 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
3846 data_size -= bvec[i].bv_len;
3847 }
3848
3849 if (data_size != 0) {
3850 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
3851 kfree(bvec);
3852 return -EIO;
3853 }
3854
3855 *page_vec = bvec;
3856 return 0;
3857}
3858
3859static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003860handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3861 char *buf, unsigned int buf_len, struct page **pages,
3862 unsigned int npages, unsigned int page_data_size)
3863{
3864 unsigned int data_offset;
3865 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003866 unsigned int cur_off;
3867 unsigned int cur_page_idx;
3868 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003869 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10003870 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003871 struct bio_vec *bvec = NULL;
3872 struct iov_iter iter;
3873 struct kvec iov;
3874 int length;
Long Li74dcf412017-11-22 17:38:46 -07003875 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003876
3877 if (shdr->Command != SMB2_READ) {
3878 cifs_dbg(VFS, "only big read responses are supported\n");
3879 return -ENOTSUPP;
3880 }
3881
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003882 if (server->ops->is_session_expired &&
3883 server->ops->is_session_expired(buf)) {
3884 cifs_reconnect(server);
3885 wake_up(&server->response_q);
3886 return -1;
3887 }
3888
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003889 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08003890 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003891 return -1;
3892
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003893 /* set up first two iov to get credits */
3894 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003895 rdata->iov[0].iov_len = 0;
3896 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003897 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003898 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003899 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3900 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3901 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3902 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3903
3904 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003905 if (rdata->result != 0) {
3906 cifs_dbg(FYI, "%s: server returned error %d\n",
3907 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003908 /* normal error on read response */
3909 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003910 return 0;
3911 }
3912
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003913 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07003914#ifdef CONFIG_CIFS_SMB_DIRECT
3915 use_rdma_mr = rdata->mr;
3916#endif
3917 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003918
3919 if (data_offset < server->vals->read_rsp_size) {
3920 /*
3921 * win2k8 sometimes sends an offset of 0 when the read
3922 * is beyond the EOF. Treat it as if the data starts just after
3923 * the header.
3924 */
3925 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
3926 __func__, data_offset);
3927 data_offset = server->vals->read_rsp_size;
3928 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
3929 /* data_offset is beyond the end of smallbuf */
3930 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
3931 __func__, data_offset);
3932 rdata->result = -EIO;
3933 dequeue_mid(mid, rdata->result);
3934 return 0;
3935 }
3936
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003937 pad_len = data_offset - server->vals->read_rsp_size;
3938
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003939 if (buf_len <= data_offset) {
3940 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003941 cur_page_idx = pad_len / PAGE_SIZE;
3942 cur_off = pad_len % PAGE_SIZE;
3943
3944 if (cur_page_idx != 0) {
3945 /* data offset is beyond the 1st page of response */
3946 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
3947 __func__, data_offset);
3948 rdata->result = -EIO;
3949 dequeue_mid(mid, rdata->result);
3950 return 0;
3951 }
3952
3953 if (data_len > page_data_size - pad_len) {
3954 /* data_len is corrupt -- discard frame */
3955 rdata->result = -EIO;
3956 dequeue_mid(mid, rdata->result);
3957 return 0;
3958 }
3959
3960 rdata->result = init_read_bvec(pages, npages, page_data_size,
3961 cur_off, &bvec);
3962 if (rdata->result != 0) {
3963 dequeue_mid(mid, rdata->result);
3964 return 0;
3965 }
3966
David Howellsaa563d72018-10-20 00:57:56 +01003967 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003968 } else if (buf_len >= data_offset + data_len) {
3969 /* read response payload is in buf */
3970 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
3971 iov.iov_base = buf + data_offset;
3972 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01003973 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003974 } else {
3975 /* read response payload cannot be in both buf and pages */
3976 WARN_ONCE(1, "buf can not contain only a part of read data");
3977 rdata->result = -EIO;
3978 dequeue_mid(mid, rdata->result);
3979 return 0;
3980 }
3981
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003982 length = rdata->copy_into_pages(server, rdata, &iter);
3983
3984 kfree(bvec);
3985
3986 if (length < 0)
3987 return length;
3988
3989 dequeue_mid(mid, false);
3990 return length;
3991}
3992
3993static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003994receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
3995{
3996 char *buf = server->smallbuf;
3997 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
3998 unsigned int npages;
3999 struct page **pages;
4000 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004001 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004002 int rc;
4003 int i = 0;
4004
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004005 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004006 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4007
4008 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4009 if (rc < 0)
4010 return rc;
4011 server->total_read += rc;
4012
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004013 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11004014 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004015 npages = DIV_ROUND_UP(len, PAGE_SIZE);
4016
4017 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
4018 if (!pages) {
4019 rc = -ENOMEM;
4020 goto discard_data;
4021 }
4022
4023 for (; i < npages; i++) {
4024 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4025 if (!pages[i]) {
4026 rc = -ENOMEM;
4027 goto discard_data;
4028 }
4029 }
4030
4031 /* read read data into pages */
4032 rc = read_data_into_pages(server, pages, npages, len);
4033 if (rc)
4034 goto free_pages;
4035
Pavel Shilovsky350be252017-04-10 10:31:33 -07004036 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004037 if (rc)
4038 goto free_pages;
4039
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004040 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004041 pages, npages, len);
4042 if (rc)
4043 goto free_pages;
4044
4045 *mid = smb2_find_mid(server, buf);
4046 if (*mid == NULL)
4047 cifs_dbg(FYI, "mid not found\n");
4048 else {
4049 cifs_dbg(FYI, "mid found\n");
4050 (*mid)->decrypted = true;
4051 rc = handle_read_data(server, *mid, buf,
4052 server->vals->read_rsp_size,
4053 pages, npages, len);
4054 }
4055
4056free_pages:
4057 for (i = i - 1; i >= 0; i--)
4058 put_page(pages[i]);
4059 kfree(pages);
4060 return rc;
4061discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07004062 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004063 goto free_pages;
4064}
4065
4066static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004067receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004068 struct mid_q_entry **mids, char **bufs,
4069 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004070{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004071 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004072 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004073 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004074 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004075 unsigned int buf_size;
4076 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004077 int next_is_large;
4078 char *next_buffer = NULL;
4079
4080 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004081
4082 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004083 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004084 server->large_buf = true;
4085 memcpy(server->bigbuf, buf, server->total_read);
4086 buf = server->bigbuf;
4087 }
4088
4089 /* now read the rest */
4090 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004091 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004092 if (length < 0)
4093 return length;
4094 server->total_read += length;
4095
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004096 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004097 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
4098 if (length)
4099 return length;
4100
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004101 next_is_large = server->large_buf;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004102one_more:
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004103 shdr = (struct smb2_sync_hdr *)buf;
4104 if (shdr->NextCommand) {
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004105 if (next_is_large)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004106 next_buffer = (char *)cifs_buf_get();
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004107 else
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004108 next_buffer = (char *)cifs_small_buf_get();
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004109 memcpy(next_buffer,
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004110 buf + le32_to_cpu(shdr->NextCommand),
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004111 pdu_length - le32_to_cpu(shdr->NextCommand));
4112 }
4113
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004114 mid_entry = smb2_find_mid(server, buf);
4115 if (mid_entry == NULL)
4116 cifs_dbg(FYI, "mid not found\n");
4117 else {
4118 cifs_dbg(FYI, "mid found\n");
4119 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004120 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004121 }
4122
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004123 if (*num_mids >= MAX_COMPOUND) {
4124 cifs_dbg(VFS, "too many PDUs in compound\n");
4125 return -1;
4126 }
4127 bufs[*num_mids] = buf;
4128 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004129
4130 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004131 ret = mid_entry->handle(server, mid_entry);
4132 else
4133 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004134
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004135 if (ret == 0 && shdr->NextCommand) {
4136 pdu_length -= le32_to_cpu(shdr->NextCommand);
4137 server->large_buf = next_is_large;
4138 if (next_is_large)
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004139 server->bigbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004140 else
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004141 server->smallbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004142 goto one_more;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004143 } else if (ret != 0) {
4144 /*
4145 * ret != 0 here means that we didn't get to handle_mid() thus
4146 * server->smallbuf and server->bigbuf are still valid. We need
4147 * to free next_buffer because it is not going to be used
4148 * anywhere.
4149 */
4150 if (next_is_large)
4151 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4152 else
4153 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004154 }
4155
4156 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004157}
4158
4159static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004160smb3_receive_transform(struct TCP_Server_Info *server,
4161 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004162{
4163 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004164 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004165 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4166 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4167
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004168 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004169 sizeof(struct smb2_sync_hdr)) {
4170 cifs_dbg(VFS, "Transform message is too small (%u)\n",
4171 pdu_length);
4172 cifs_reconnect(server);
4173 wake_up(&server->response_q);
4174 return -ECONNABORTED;
4175 }
4176
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004177 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004178 cifs_dbg(VFS, "Transform message is broken\n");
4179 cifs_reconnect(server);
4180 wake_up(&server->response_q);
4181 return -ECONNABORTED;
4182 }
4183
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004184 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004185 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
4186 *num_mids = 1;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004187 return receive_encrypted_read(server, &mids[0]);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004188 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004189
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004190 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004191}
4192
4193int
4194smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4195{
4196 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
4197
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004198 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004199 NULL, 0, 0);
4200}
4201
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004202static int
4203smb2_next_header(char *buf)
4204{
4205 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
4206 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
4207
4208 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
4209 return sizeof(struct smb2_transform_hdr) +
4210 le32_to_cpu(t_hdr->OriginalMessageSize);
4211
4212 return le32_to_cpu(hdr->NextCommand);
4213}
4214
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004215static int
4216smb2_make_node(unsigned int xid, struct inode *inode,
4217 struct dentry *dentry, struct cifs_tcon *tcon,
4218 char *full_path, umode_t mode, dev_t dev)
4219{
4220 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4221 int rc = -EPERM;
4222 int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
4223 FILE_ALL_INFO *buf = NULL;
4224 struct cifs_io_parms io_parms;
4225 __u32 oplock = 0;
4226 struct cifs_fid fid;
4227 struct cifs_open_parms oparms;
4228 unsigned int bytes_written;
4229 struct win_dev *pdev;
4230 struct kvec iov[2];
4231
4232 /*
4233 * Check if mounted with mount parm 'sfu' mount parm.
4234 * SFU emulation should work with all servers, but only
4235 * supports block and char device (no socket & fifo),
4236 * and was used by default in earlier versions of Windows
4237 */
4238 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
4239 goto out;
4240
4241 /*
4242 * TODO: Add ability to create instead via reparse point. Windows (e.g.
4243 * their current NFS server) uses this approach to expose special files
4244 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
4245 */
4246
4247 if (!S_ISCHR(mode) && !S_ISBLK(mode))
4248 goto out;
4249
4250 cifs_dbg(FYI, "sfu compat create special file\n");
4251
4252 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
4253 if (buf == NULL) {
4254 rc = -ENOMEM;
4255 goto out;
4256 }
4257
4258 if (backup_cred(cifs_sb))
4259 create_options |= CREATE_OPEN_BACKUP_INTENT;
4260
4261 oparms.tcon = tcon;
4262 oparms.cifs_sb = cifs_sb;
4263 oparms.desired_access = GENERIC_WRITE;
4264 oparms.create_options = create_options;
4265 oparms.disposition = FILE_CREATE;
4266 oparms.path = full_path;
4267 oparms.fid = &fid;
4268 oparms.reconnect = false;
4269
4270 if (tcon->ses->server->oplocks)
4271 oplock = REQ_OPLOCK;
4272 else
4273 oplock = 0;
4274 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
4275 if (rc)
4276 goto out;
4277
4278 /*
4279 * BB Do not bother to decode buf since no local inode yet to put
4280 * timestamps in, but we can reuse it safely.
4281 */
4282
4283 pdev = (struct win_dev *)buf;
4284 io_parms.pid = current->tgid;
4285 io_parms.tcon = tcon;
4286 io_parms.offset = 0;
4287 io_parms.length = sizeof(struct win_dev);
4288 iov[1].iov_base = buf;
4289 iov[1].iov_len = sizeof(struct win_dev);
4290 if (S_ISCHR(mode)) {
4291 memcpy(pdev->type, "IntxCHR", 8);
4292 pdev->major = cpu_to_le64(MAJOR(dev));
4293 pdev->minor = cpu_to_le64(MINOR(dev));
4294 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4295 &bytes_written, iov, 1);
4296 } else if (S_ISBLK(mode)) {
4297 memcpy(pdev->type, "IntxBLK", 8);
4298 pdev->major = cpu_to_le64(MAJOR(dev));
4299 pdev->minor = cpu_to_le64(MINOR(dev));
4300 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4301 &bytes_written, iov, 1);
4302 }
4303 tcon->ses->server->ops->close(xid, tcon, &fid);
4304 d_drop(dentry);
4305
4306 /* FIXME: add code here to set EAs */
4307out:
4308 kfree(buf);
4309 return rc;
4310}
4311
4312
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004313struct smb_version_operations smb20_operations = {
4314 .compare_fids = smb2_compare_fids,
4315 .setup_request = smb2_setup_request,
4316 .setup_async_request = smb2_setup_async_request,
4317 .check_receive = smb2_check_receive,
4318 .add_credits = smb2_add_credits,
4319 .set_credits = smb2_set_credits,
4320 .get_credits_field = smb2_get_credits_field,
4321 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004322 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004323 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004324 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004325 .read_data_offset = smb2_read_data_offset,
4326 .read_data_length = smb2_read_data_length,
4327 .map_error = map_smb2_to_linux_error,
4328 .find_mid = smb2_find_mid,
4329 .check_message = smb2_check_message,
4330 .dump_detail = smb2_dump_detail,
4331 .clear_stats = smb2_clear_stats,
4332 .print_stats = smb2_print_stats,
4333 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004334 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004335 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004336 .need_neg = smb2_need_neg,
4337 .negotiate = smb2_negotiate,
4338 .negotiate_wsize = smb2_negotiate_wsize,
4339 .negotiate_rsize = smb2_negotiate_rsize,
4340 .sess_setup = SMB2_sess_setup,
4341 .logoff = SMB2_logoff,
4342 .tree_connect = SMB2_tcon,
4343 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004344 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004345 .is_path_accessible = smb2_is_path_accessible,
4346 .can_echo = smb2_can_echo,
4347 .echo = SMB2_echo,
4348 .query_path_info = smb2_query_path_info,
4349 .get_srv_inum = smb2_get_srv_inum,
4350 .query_file_info = smb2_query_file_info,
4351 .set_path_size = smb2_set_path_size,
4352 .set_file_size = smb2_set_file_size,
4353 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004354 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004355 .mkdir = smb2_mkdir,
4356 .mkdir_setinfo = smb2_mkdir_setinfo,
4357 .rmdir = smb2_rmdir,
4358 .unlink = smb2_unlink,
4359 .rename = smb2_rename_path,
4360 .create_hardlink = smb2_create_hardlink,
4361 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01004362 .query_mf_symlink = smb3_query_mf_symlink,
4363 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004364 .open = smb2_open_file,
4365 .set_fid = smb2_set_fid,
4366 .close = smb2_close_file,
4367 .flush = smb2_flush_file,
4368 .async_readv = smb2_async_readv,
4369 .async_writev = smb2_async_writev,
4370 .sync_read = smb2_sync_read,
4371 .sync_write = smb2_sync_write,
4372 .query_dir_first = smb2_query_dir_first,
4373 .query_dir_next = smb2_query_dir_next,
4374 .close_dir = smb2_close_dir,
4375 .calc_smb_size = smb2_calc_size,
4376 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004377 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004378 .oplock_response = smb2_oplock_response,
4379 .queryfs = smb2_queryfs,
4380 .mand_lock = smb2_mand_lock,
4381 .mand_unlock_range = smb2_unlock_range,
4382 .push_mand_locks = smb2_push_mandatory_locks,
4383 .get_lease_key = smb2_get_lease_key,
4384 .set_lease_key = smb2_set_lease_key,
4385 .new_lease_key = smb2_new_lease_key,
4386 .calc_signature = smb2_calc_signature,
4387 .is_read_op = smb2_is_read_op,
4388 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004389 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004390 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004391 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004392 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004393 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004394 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304395 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004396#ifdef CONFIG_CIFS_XATTR
4397 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004398 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004399#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004400 .get_acl = get_smb2_acl,
4401 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004402 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004403 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004404 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004405 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004406 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004407 .llseek = smb3_llseek,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004408};
4409
Steve French1080ef72011-02-24 18:07:19 +00004410struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004411 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004412 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04004413 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004414 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04004415 .add_credits = smb2_add_credits,
4416 .set_credits = smb2_set_credits,
4417 .get_credits_field = smb2_get_credits_field,
4418 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004419 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004420 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004421 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004422 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004423 .read_data_offset = smb2_read_data_offset,
4424 .read_data_length = smb2_read_data_length,
4425 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004426 .find_mid = smb2_find_mid,
4427 .check_message = smb2_check_message,
4428 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04004429 .clear_stats = smb2_clear_stats,
4430 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004431 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004432 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004433 .downgrade_oplock = smb21_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04004434 .need_neg = smb2_need_neg,
4435 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07004436 .negotiate_wsize = smb2_negotiate_wsize,
4437 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04004438 .sess_setup = SMB2_sess_setup,
4439 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04004440 .tree_connect = SMB2_tcon,
4441 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004442 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04004443 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04004444 .can_echo = smb2_can_echo,
4445 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04004446 .query_path_info = smb2_query_path_info,
4447 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07004448 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004449 .set_path_size = smb2_set_path_size,
4450 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004451 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004452 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04004453 .mkdir = smb2_mkdir,
4454 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04004455 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07004456 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004457 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07004458 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004459 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004460 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004461 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07004462 .open = smb2_open_file,
4463 .set_fid = smb2_set_fid,
4464 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07004465 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004466 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07004467 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004468 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004469 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004470 .query_dir_first = smb2_query_dir_first,
4471 .query_dir_next = smb2_query_dir_next,
4472 .close_dir = smb2_close_dir,
4473 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07004474 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004475 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004476 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07004477 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07004478 .mand_lock = smb2_mand_lock,
4479 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07004480 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07004481 .get_lease_key = smb2_get_lease_key,
4482 .set_lease_key = smb2_set_lease_key,
4483 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06004484 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004485 .is_read_op = smb21_is_read_op,
4486 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004487 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004488 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004489 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004490 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004491 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05004492 .enum_snapshots = smb3_enum_snapshots,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004493 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304494 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004495#ifdef CONFIG_CIFS_XATTR
4496 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004497 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004498#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004499 .get_acl = get_smb2_acl,
4500 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004501 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004502 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004503 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004504 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004505 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004506 .llseek = smb3_llseek,
Steve French38107d42012-12-08 22:08:06 -06004507};
4508
Steve French38107d42012-12-08 22:08:06 -06004509struct smb_version_operations smb30_operations = {
4510 .compare_fids = smb2_compare_fids,
4511 .setup_request = smb2_setup_request,
4512 .setup_async_request = smb2_setup_async_request,
4513 .check_receive = smb2_check_receive,
4514 .add_credits = smb2_add_credits,
4515 .set_credits = smb2_set_credits,
4516 .get_credits_field = smb2_get_credits_field,
4517 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004518 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004519 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06004520 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004521 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06004522 .read_data_offset = smb2_read_data_offset,
4523 .read_data_length = smb2_read_data_length,
4524 .map_error = map_smb2_to_linux_error,
4525 .find_mid = smb2_find_mid,
4526 .check_message = smb2_check_message,
4527 .dump_detail = smb2_dump_detail,
4528 .clear_stats = smb2_clear_stats,
4529 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05004530 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06004531 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004532 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004533 .downgrade_oplock = smb21_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06004534 .need_neg = smb2_need_neg,
4535 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004536 .negotiate_wsize = smb3_negotiate_wsize,
4537 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06004538 .sess_setup = SMB2_sess_setup,
4539 .logoff = SMB2_logoff,
4540 .tree_connect = SMB2_tcon,
4541 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05004542 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06004543 .is_path_accessible = smb2_is_path_accessible,
4544 .can_echo = smb2_can_echo,
4545 .echo = SMB2_echo,
4546 .query_path_info = smb2_query_path_info,
4547 .get_srv_inum = smb2_get_srv_inum,
4548 .query_file_info = smb2_query_file_info,
4549 .set_path_size = smb2_set_path_size,
4550 .set_file_size = smb2_set_file_size,
4551 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004552 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06004553 .mkdir = smb2_mkdir,
4554 .mkdir_setinfo = smb2_mkdir_setinfo,
4555 .rmdir = smb2_rmdir,
4556 .unlink = smb2_unlink,
4557 .rename = smb2_rename_path,
4558 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004559 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004560 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004561 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06004562 .open = smb2_open_file,
4563 .set_fid = smb2_set_fid,
4564 .close = smb2_close_file,
4565 .flush = smb2_flush_file,
4566 .async_readv = smb2_async_readv,
4567 .async_writev = smb2_async_writev,
4568 .sync_read = smb2_sync_read,
4569 .sync_write = smb2_sync_write,
4570 .query_dir_first = smb2_query_dir_first,
4571 .query_dir_next = smb2_query_dir_next,
4572 .close_dir = smb2_close_dir,
4573 .calc_smb_size = smb2_calc_size,
4574 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004575 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06004576 .oplock_response = smb2_oplock_response,
4577 .queryfs = smb2_queryfs,
4578 .mand_lock = smb2_mand_lock,
4579 .mand_unlock_range = smb2_unlock_range,
4580 .push_mand_locks = smb2_push_mandatory_locks,
4581 .get_lease_key = smb2_get_lease_key,
4582 .set_lease_key = smb2_set_lease_key,
4583 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004584 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06004585 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004586 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004587 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004588 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004589 .create_lease_buf = smb3_create_lease_buf,
4590 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004591 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05004592 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06004593 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004594 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004595 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05004596 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004597 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004598 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004599 .is_transform_hdr = smb3_is_transform_hdr,
4600 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004601 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304602 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004603#ifdef CONFIG_CIFS_XATTR
4604 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004605 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004606#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004607 .get_acl = get_smb2_acl,
4608 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004609 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004610 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004611 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004612 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004613 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004614 .llseek = smb3_llseek,
Steve French1080ef72011-02-24 18:07:19 +00004615};
4616
Steve Frenchaab18932015-06-23 23:37:11 -05004617struct smb_version_operations smb311_operations = {
4618 .compare_fids = smb2_compare_fids,
4619 .setup_request = smb2_setup_request,
4620 .setup_async_request = smb2_setup_async_request,
4621 .check_receive = smb2_check_receive,
4622 .add_credits = smb2_add_credits,
4623 .set_credits = smb2_set_credits,
4624 .get_credits_field = smb2_get_credits_field,
4625 .get_credits = smb2_get_credits,
4626 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004627 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05004628 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004629 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05004630 .read_data_offset = smb2_read_data_offset,
4631 .read_data_length = smb2_read_data_length,
4632 .map_error = map_smb2_to_linux_error,
4633 .find_mid = smb2_find_mid,
4634 .check_message = smb2_check_message,
4635 .dump_detail = smb2_dump_detail,
4636 .clear_stats = smb2_clear_stats,
4637 .print_stats = smb2_print_stats,
4638 .dump_share_caps = smb2_dump_share_caps,
4639 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004640 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004641 .downgrade_oplock = smb21_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05004642 .need_neg = smb2_need_neg,
4643 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004644 .negotiate_wsize = smb3_negotiate_wsize,
4645 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05004646 .sess_setup = SMB2_sess_setup,
4647 .logoff = SMB2_logoff,
4648 .tree_connect = SMB2_tcon,
4649 .tree_disconnect = SMB2_tdis,
4650 .qfs_tcon = smb3_qfs_tcon,
4651 .is_path_accessible = smb2_is_path_accessible,
4652 .can_echo = smb2_can_echo,
4653 .echo = SMB2_echo,
4654 .query_path_info = smb2_query_path_info,
4655 .get_srv_inum = smb2_get_srv_inum,
4656 .query_file_info = smb2_query_file_info,
4657 .set_path_size = smb2_set_path_size,
4658 .set_file_size = smb2_set_file_size,
4659 .set_file_info = smb2_set_file_info,
4660 .set_compression = smb2_set_compression,
4661 .mkdir = smb2_mkdir,
4662 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05004663 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05004664 .rmdir = smb2_rmdir,
4665 .unlink = smb2_unlink,
4666 .rename = smb2_rename_path,
4667 .create_hardlink = smb2_create_hardlink,
4668 .query_symlink = smb2_query_symlink,
4669 .query_mf_symlink = smb3_query_mf_symlink,
4670 .create_mf_symlink = smb3_create_mf_symlink,
4671 .open = smb2_open_file,
4672 .set_fid = smb2_set_fid,
4673 .close = smb2_close_file,
4674 .flush = smb2_flush_file,
4675 .async_readv = smb2_async_readv,
4676 .async_writev = smb2_async_writev,
4677 .sync_read = smb2_sync_read,
4678 .sync_write = smb2_sync_write,
4679 .query_dir_first = smb2_query_dir_first,
4680 .query_dir_next = smb2_query_dir_next,
4681 .close_dir = smb2_close_dir,
4682 .calc_smb_size = smb2_calc_size,
4683 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004684 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05004685 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05004686 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05004687 .mand_lock = smb2_mand_lock,
4688 .mand_unlock_range = smb2_unlock_range,
4689 .push_mand_locks = smb2_push_mandatory_locks,
4690 .get_lease_key = smb2_get_lease_key,
4691 .set_lease_key = smb2_set_lease_key,
4692 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004693 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05004694 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004695 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05004696 .is_read_op = smb21_is_read_op,
4697 .set_oplock_level = smb3_set_oplock_level,
4698 .create_lease_buf = smb3_create_lease_buf,
4699 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004700 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07004701 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05004702/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
4703 .wp_retry_size = smb2_wp_retry_size,
4704 .dir_needs_close = smb2_dir_needs_close,
4705 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004706 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004707 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004708 .is_transform_hdr = smb3_is_transform_hdr,
4709 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004710 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304711 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004712#ifdef CONFIG_CIFS_XATTR
4713 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004714 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004715#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10004716 .get_acl = get_smb2_acl,
4717 .get_acl_by_fid = get_smb2_acl_by_fid,
4718 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004719 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004720 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004721 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004722 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004723 .llseek = smb3_llseek,
Steve Frenchaab18932015-06-23 23:37:11 -05004724};
Steve Frenchaab18932015-06-23 23:37:11 -05004725
Steve Frenchdd446b12012-11-28 23:21:06 -06004726struct smb_version_values smb20_values = {
4727 .version_string = SMB20_VERSION_STRING,
4728 .protocol_id = SMB20_PROT_ID,
4729 .req_capabilities = 0, /* MBZ */
4730 .large_lock_type = 0,
4731 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4732 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4733 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004734 .header_size = sizeof(struct smb2_sync_hdr),
4735 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06004736 .max_header_size = MAX_SMB2_HDR_SIZE,
4737 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4738 .lock_cmd = SMB2_LOCK,
4739 .cap_unix = 0,
4740 .cap_nt_find = SMB2_NT_FIND,
4741 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004742 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4743 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004744 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06004745};
4746
Steve French1080ef72011-02-24 18:07:19 +00004747struct smb_version_values smb21_values = {
4748 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004749 .protocol_id = SMB21_PROT_ID,
4750 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
4751 .large_lock_type = 0,
4752 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4753 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4754 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004755 .header_size = sizeof(struct smb2_sync_hdr),
4756 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004757 .max_header_size = MAX_SMB2_HDR_SIZE,
4758 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4759 .lock_cmd = SMB2_LOCK,
4760 .cap_unix = 0,
4761 .cap_nt_find = SMB2_NT_FIND,
4762 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004763 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4764 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004765 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05004766};
4767
Steve French9764c022017-09-17 10:41:35 -05004768struct smb_version_values smb3any_values = {
4769 .version_string = SMB3ANY_VERSION_STRING,
4770 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004771 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004772 .large_lock_type = 0,
4773 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4774 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4775 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004776 .header_size = sizeof(struct smb2_sync_hdr),
4777 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004778 .max_header_size = MAX_SMB2_HDR_SIZE,
4779 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4780 .lock_cmd = SMB2_LOCK,
4781 .cap_unix = 0,
4782 .cap_nt_find = SMB2_NT_FIND,
4783 .cap_large_files = SMB2_LARGE_FILES,
4784 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4785 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4786 .create_lease_size = sizeof(struct create_lease_v2),
4787};
4788
4789struct smb_version_values smbdefault_values = {
4790 .version_string = SMBDEFAULT_VERSION_STRING,
4791 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004792 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004793 .large_lock_type = 0,
4794 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4795 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4796 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004797 .header_size = sizeof(struct smb2_sync_hdr),
4798 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004799 .max_header_size = MAX_SMB2_HDR_SIZE,
4800 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4801 .lock_cmd = SMB2_LOCK,
4802 .cap_unix = 0,
4803 .cap_nt_find = SMB2_NT_FIND,
4804 .cap_large_files = SMB2_LARGE_FILES,
4805 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4806 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4807 .create_lease_size = sizeof(struct create_lease_v2),
4808};
4809
Steve Frenche4aa25e2012-10-01 12:26:22 -05004810struct smb_version_values smb30_values = {
4811 .version_string = SMB30_VERSION_STRING,
4812 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004813 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004814 .large_lock_type = 0,
4815 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4816 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4817 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004818 .header_size = sizeof(struct smb2_sync_hdr),
4819 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004820 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004821 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004822 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004823 .cap_unix = 0,
4824 .cap_nt_find = SMB2_NT_FIND,
4825 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004826 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4827 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004828 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00004829};
Steve French20b6d8b2013-06-12 22:48:41 -05004830
4831struct smb_version_values smb302_values = {
4832 .version_string = SMB302_VERSION_STRING,
4833 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004834 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05004835 .large_lock_type = 0,
4836 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4837 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4838 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004839 .header_size = sizeof(struct smb2_sync_hdr),
4840 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05004841 .max_header_size = MAX_SMB2_HDR_SIZE,
4842 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4843 .lock_cmd = SMB2_LOCK,
4844 .cap_unix = 0,
4845 .cap_nt_find = SMB2_NT_FIND,
4846 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004847 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4848 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004849 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05004850};
Steve French5f7fbf72014-12-17 22:52:58 -06004851
Steve French5f7fbf72014-12-17 22:52:58 -06004852struct smb_version_values smb311_values = {
4853 .version_string = SMB311_VERSION_STRING,
4854 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004855 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06004856 .large_lock_type = 0,
4857 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4858 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4859 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004860 .header_size = sizeof(struct smb2_sync_hdr),
4861 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06004862 .max_header_size = MAX_SMB2_HDR_SIZE,
4863 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4864 .lock_cmd = SMB2_LOCK,
4865 .cap_unix = 0,
4866 .cap_nt_find = SMB2_NT_FIND,
4867 .cap_large_files = SMB2_LARGE_FILES,
4868 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4869 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4870 .create_lease_size = sizeof(struct create_lease_v2),
4871};