blob: c742844849472bcab9f114028d840e48495f9dd3 [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Steve French1080ef72011-02-24 18:07:19 +00006 */
7
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07008#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07009#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050010#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070011#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020012#include <linux/uuid.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070013#include <crypto/aead.h>
Steve French1080ef72011-02-24 18:07:19 +000014#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040015#include "smb2pdu.h"
16#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040017#include "cifsproto.h"
18#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040019#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070020#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070021#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050022#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070023#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040024
Pavel Shilovskyef68e832019-01-18 17:25:36 -080025/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040026static int
27change_conf(struct TCP_Server_Info *server)
28{
29 server->credits += server->echo_credits + server->oplock_credits;
30 server->oplock_credits = server->echo_credits = 0;
31 switch (server->credits) {
32 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080033 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040034 case 1:
35 server->echoes = false;
36 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040037 break;
38 case 2:
39 server->echoes = true;
40 server->oplocks = false;
41 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040042 break;
43 default:
44 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050045 if (enable_oplocks) {
46 server->oplocks = true;
47 server->oplock_credits = 1;
48 } else
49 server->oplocks = false;
50
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040051 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040052 }
53 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080054 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040055}
56
57static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080058smb2_add_credits(struct TCP_Server_Info *server,
59 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040060{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080061 int *val, rc = -1;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080062 unsigned int add = credits->value;
63 unsigned int instance = credits->instance;
64 bool reconnect_detected = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080065
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040066 spin_lock(&server->req_lock);
67 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050068
69 /* eg found case where write overlapping reconnect messed up credits */
70 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
71 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
72 server->hostname, *val);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080073 if ((instance == 0) || (instance == server->reconnect_instance))
74 *val += add;
75 else
76 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050077
Steve French141891f2016-09-23 00:44:16 -050078 if (*val > 65000) {
79 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
80 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
81 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040082 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040083 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040084 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070085 /*
86 * Sometimes server returns 0 credits on oplock break ack - we need to
87 * rebalance credits in this case.
88 */
89 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
90 server->oplocks) {
91 if (server->credits > 1) {
92 server->credits--;
93 server->oplock_credits++;
94 }
95 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040096 spin_unlock(&server->req_lock);
97 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -080098
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080099 if (reconnect_detected)
100 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
101 add, instance);
102
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800103 if (server->tcpStatus == CifsNeedReconnect
104 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800105 return;
106
107 switch (rc) {
108 case -1:
109 /* change_conf hasn't been executed */
110 break;
111 case 0:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000112 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800113 break;
114 case 1:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000115 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800116 break;
117 case 2:
118 cifs_dbg(FYI, "disabling oplocks\n");
119 break;
120 default:
121 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
122 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400123}
124
125static void
126smb2_set_credits(struct TCP_Server_Info *server, const int val)
127{
128 spin_lock(&server->req_lock);
129 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500130 if (val == 1)
131 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400132 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500133 /* don't log while holding the lock */
134 if (val == 1)
135 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400136}
137
138static int *
139smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
140{
141 switch (optype) {
142 case CIFS_ECHO_OP:
143 return &server->echo_credits;
144 case CIFS_OBREAK_OP:
145 return &server->oplock_credits;
146 default:
147 return &server->credits;
148 }
149}
150
151static unsigned int
152smb2_get_credits(struct mid_q_entry *mid)
153{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000154 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700155
Pavel Shilovsky3d3003f2019-01-22 16:50:21 -0800156 if (mid->mid_state == MID_RESPONSE_RECEIVED
157 || mid->mid_state == MID_RESPONSE_MALFORMED)
158 return le16_to_cpu(shdr->CreditRequest);
159
160 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400161}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400162
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400163static int
164smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800165 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400166{
167 int rc = 0;
168 unsigned int scredits;
169
170 spin_lock(&server->req_lock);
171 while (1) {
172 if (server->credits <= 0) {
173 spin_unlock(&server->req_lock);
174 cifs_num_waiters_inc(server);
175 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000176 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400177 cifs_num_waiters_dec(server);
178 if (rc)
179 return rc;
180 spin_lock(&server->req_lock);
181 } else {
182 if (server->tcpStatus == CifsExiting) {
183 spin_unlock(&server->req_lock);
184 return -ENOENT;
185 }
186
187 scredits = server->credits;
188 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800189 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400190 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800191 credits->value = 0;
192 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400193 break;
194 }
195
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800196 /* leave some credits for reopen and other ops */
197 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400198 *num = min_t(unsigned int, size,
199 scredits * SMB2_MAX_BUFFER_SIZE);
200
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800201 credits->value =
202 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
203 credits->instance = server->reconnect_instance;
204 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400205 server->in_flight++;
206 break;
207 }
208 }
209 spin_unlock(&server->req_lock);
210 return rc;
211}
212
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800213static int
214smb2_adjust_credits(struct TCP_Server_Info *server,
215 struct cifs_credits *credits,
216 const unsigned int payload_size)
217{
218 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
219
220 if (!credits->value || credits->value == new_val)
221 return 0;
222
223 if (credits->value < new_val) {
224 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
225 credits->value, new_val);
226 return -ENOTSUPP;
227 }
228
229 spin_lock(&server->req_lock);
230
231 if (server->reconnect_instance != credits->instance) {
232 spin_unlock(&server->req_lock);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000233 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800234 credits->value - new_val);
235 return -EAGAIN;
236 }
237
238 server->credits += credits->value - new_val;
239 spin_unlock(&server->req_lock);
240 wake_up(&server->request_q);
241 credits->value = new_val;
242 return 0;
243}
244
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400245static __u64
246smb2_get_next_mid(struct TCP_Server_Info *server)
247{
248 __u64 mid;
249 /* for SMB2 we need the current value */
250 spin_lock(&GlobalMid_Lock);
251 mid = server->CurrentMid++;
252 spin_unlock(&GlobalMid_Lock);
253 return mid;
254}
Steve French1080ef72011-02-24 18:07:19 +0000255
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800256static void
257smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
258{
259 spin_lock(&GlobalMid_Lock);
260 if (server->CurrentMid >= val)
261 server->CurrentMid -= val;
262 spin_unlock(&GlobalMid_Lock);
263}
264
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400265static struct mid_q_entry *
266smb2_find_mid(struct TCP_Server_Info *server, char *buf)
267{
268 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000269 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700270 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400271
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700272 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000273 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600274 return NULL;
275 }
276
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400277 spin_lock(&GlobalMid_Lock);
278 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000279 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400280 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700281 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200282 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400283 spin_unlock(&GlobalMid_Lock);
284 return mid;
285 }
286 }
287 spin_unlock(&GlobalMid_Lock);
288 return NULL;
289}
290
291static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600292smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400293{
294#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000295 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400296
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000297 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700298 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
299 shdr->ProcessId);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000300 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500301 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400302#endif
303}
304
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400305static bool
306smb2_need_neg(struct TCP_Server_Info *server)
307{
308 return server->max_read == 0;
309}
310
311static int
312smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
313{
314 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200315
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400316 ses->server->CurrentMid = 0;
317 rc = SMB2_negotiate(xid, ses);
318 /* BB we probably don't need to retry with modern servers */
319 if (rc == -EAGAIN)
320 rc = -EHOSTDOWN;
321 return rc;
322}
323
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700324static unsigned int
325smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
326{
327 struct TCP_Server_Info *server = tcon->ses->server;
328 unsigned int wsize;
329
330 /* start with specified wsize, or default */
331 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
332 wsize = min_t(unsigned int, wsize, server->max_write);
Long Li09902f82017-11-22 17:38:39 -0700333#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700334 if (server->rdma) {
335 if (server->sign)
336 wsize = min_t(unsigned int,
337 wsize, server->smbd_conn->max_fragmented_send_size);
338 else
339 wsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700340 wsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700341 }
Long Li09902f82017-11-22 17:38:39 -0700342#endif
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400343 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
344 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700345
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700346 return wsize;
347}
348
349static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500350smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
351{
352 struct TCP_Server_Info *server = tcon->ses->server;
353 unsigned int wsize;
354
355 /* start with specified wsize, or default */
356 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
357 wsize = min_t(unsigned int, wsize, server->max_write);
358#ifdef CONFIG_CIFS_SMB_DIRECT
359 if (server->rdma) {
360 if (server->sign)
361 wsize = min_t(unsigned int,
362 wsize, server->smbd_conn->max_fragmented_send_size);
363 else
364 wsize = min_t(unsigned int,
365 wsize, server->smbd_conn->max_readwrite_size);
366 }
367#endif
368 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
369 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
370
371 return wsize;
372}
373
374static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700375smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
376{
377 struct TCP_Server_Info *server = tcon->ses->server;
378 unsigned int rsize;
379
380 /* start with specified rsize, or default */
381 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
382 rsize = min_t(unsigned int, rsize, server->max_read);
Long Li09902f82017-11-22 17:38:39 -0700383#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700384 if (server->rdma) {
385 if (server->sign)
386 rsize = min_t(unsigned int,
387 rsize, server->smbd_conn->max_fragmented_recv_size);
388 else
389 rsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700390 rsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700391 }
Long Li09902f82017-11-22 17:38:39 -0700392#endif
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400393
394 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
395 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700396
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700397 return rsize;
398}
399
Steve French3d621232018-09-25 15:33:47 -0500400static unsigned int
401smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
402{
403 struct TCP_Server_Info *server = tcon->ses->server;
404 unsigned int rsize;
405
406 /* start with specified rsize, or default */
407 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
408 rsize = min_t(unsigned int, rsize, server->max_read);
409#ifdef CONFIG_CIFS_SMB_DIRECT
410 if (server->rdma) {
411 if (server->sign)
412 rsize = min_t(unsigned int,
413 rsize, server->smbd_conn->max_fragmented_recv_size);
414 else
415 rsize = min_t(unsigned int,
416 rsize, server->smbd_conn->max_readwrite_size);
417 }
418#endif
419
420 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
421 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
422
423 return rsize;
424}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200425
426static int
427parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
428 size_t buf_len,
429 struct cifs_server_iface **iface_list,
430 size_t *iface_count)
431{
432 struct network_interface_info_ioctl_rsp *p;
433 struct sockaddr_in *addr4;
434 struct sockaddr_in6 *addr6;
435 struct iface_info_ipv4 *p4;
436 struct iface_info_ipv6 *p6;
437 struct cifs_server_iface *info;
438 ssize_t bytes_left;
439 size_t next = 0;
440 int nb_iface = 0;
441 int rc = 0;
442
443 *iface_list = NULL;
444 *iface_count = 0;
445
446 /*
447 * Fist pass: count and sanity check
448 */
449
450 bytes_left = buf_len;
451 p = buf;
452 while (bytes_left >= sizeof(*p)) {
453 nb_iface++;
454 next = le32_to_cpu(p->Next);
455 if (!next) {
456 bytes_left -= sizeof(*p);
457 break;
458 }
459 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
460 bytes_left -= next;
461 }
462
463 if (!nb_iface) {
464 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
465 rc = -EINVAL;
466 goto out;
467 }
468
469 if (bytes_left || p->Next)
470 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
471
472
473 /*
474 * Second pass: extract info to internal structure
475 */
476
477 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
478 if (!*iface_list) {
479 rc = -ENOMEM;
480 goto out;
481 }
482
483 info = *iface_list;
484 bytes_left = buf_len;
485 p = buf;
486 while (bytes_left >= sizeof(*p)) {
487 info->speed = le64_to_cpu(p->LinkSpeed);
488 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
489 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
490
491 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
492 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
493 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
494 le32_to_cpu(p->Capability));
495
496 switch (p->Family) {
497 /*
498 * The kernel and wire socket structures have the same
499 * layout and use network byte order but make the
500 * conversion explicit in case either one changes.
501 */
502 case INTERNETWORK:
503 addr4 = (struct sockaddr_in *)&info->sockaddr;
504 p4 = (struct iface_info_ipv4 *)p->Buffer;
505 addr4->sin_family = AF_INET;
506 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
507
508 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
509 addr4->sin_port = cpu_to_be16(CIFS_PORT);
510
511 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
512 &addr4->sin_addr);
513 break;
514 case INTERNETWORKV6:
515 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
516 p6 = (struct iface_info_ipv6 *)p->Buffer;
517 addr6->sin6_family = AF_INET6;
518 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
519
520 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
521 addr6->sin6_flowinfo = 0;
522 addr6->sin6_scope_id = 0;
523 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
524
525 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
526 &addr6->sin6_addr);
527 break;
528 default:
529 cifs_dbg(VFS,
530 "%s: skipping unsupported socket family\n",
531 __func__);
532 goto next_iface;
533 }
534
535 (*iface_count)++;
536 info++;
537next_iface:
538 next = le32_to_cpu(p->Next);
539 if (!next)
540 break;
541 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
542 bytes_left -= next;
543 }
544
545 if (!*iface_count) {
546 rc = -EINVAL;
547 goto out;
548 }
549
550out:
551 if (rc) {
552 kfree(*iface_list);
553 *iface_count = 0;
554 *iface_list = NULL;
555 }
556 return rc;
557}
558
559
Steve Frenchc481e9f2013-10-14 01:21:53 -0500560static int
561SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
562{
563 int rc;
564 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200565 struct network_interface_info_ioctl_rsp *out_buf = NULL;
566 struct cifs_server_iface *iface_list;
567 size_t iface_count;
568 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500569
570 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
571 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
572 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500573 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500574 if (rc == -EOPNOTSUPP) {
575 cifs_dbg(FYI,
576 "server does not support query network interfaces\n");
577 goto out;
578 } else if (rc != 0) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000579 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200580 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500581 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200582
583 rc = parse_server_interfaces(out_buf, ret_data_len,
584 &iface_list, &iface_count);
585 if (rc)
586 goto out;
587
588 spin_lock(&ses->iface_lock);
589 kfree(ses->iface_list);
590 ses->iface_list = iface_list;
591 ses->iface_count = iface_count;
592 ses->iface_last_update = jiffies;
593 spin_unlock(&ses->iface_lock);
594
595out:
Steve French24df1482016-09-29 04:20:23 -0500596 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500597 return rc;
598}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500599
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000600static void
601smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000602{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000603 struct cached_fid *cfid = container_of(ref, struct cached_fid,
604 refcount);
605
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000606 if (cfid->is_valid) {
607 cifs_dbg(FYI, "clear cached root file handle\n");
608 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
609 cfid->fid->volatile_fid);
610 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000611 cfid->file_all_info_is_valid = false;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000612 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000613}
614
615void close_shroot(struct cached_fid *cfid)
616{
617 mutex_lock(&cfid->fid_mutex);
618 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000619 mutex_unlock(&cfid->fid_mutex);
620}
621
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000622void
623smb2_cached_lease_break(struct work_struct *work)
624{
625 struct cached_fid *cfid = container_of(work,
626 struct cached_fid, lease_break);
627
628 close_shroot(cfid);
629}
630
Steve French3d4ef9a2018-04-25 22:19:09 -0500631/*
632 * Open the directory at the root of a share
633 */
634int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
635{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000636 struct cifs_ses *ses = tcon->ses;
637 struct TCP_Server_Info *server = ses->server;
638 struct cifs_open_parms oparms;
639 struct smb2_create_rsp *o_rsp = NULL;
640 struct smb2_query_info_rsp *qi_rsp = NULL;
641 int resp_buftype[2];
642 struct smb_rqst rqst[2];
643 struct kvec rsp_iov[2];
644 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
645 struct kvec qi_iov[1];
646 int rc, flags = 0;
647 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000648 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Steve French3d4ef9a2018-04-25 22:19:09 -0500649
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000650 mutex_lock(&tcon->crfid.fid_mutex);
651 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500652 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000653 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000654 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000655 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500656 return 0;
657 }
658
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000659 if (smb3_encryption_required(tcon))
660 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500661
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000662 memset(rqst, 0, sizeof(rqst));
663 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
664 memset(rsp_iov, 0, sizeof(rsp_iov));
665
666 /* Open */
667 memset(&open_iov, 0, sizeof(open_iov));
668 rqst[0].rq_iov = open_iov;
669 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
670
671 oparms.tcon = tcon;
672 oparms.create_options = 0;
673 oparms.desired_access = FILE_READ_ATTRIBUTES;
674 oparms.disposition = FILE_OPEN;
675 oparms.fid = pfid;
676 oparms.reconnect = false;
677
678 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
679 if (rc)
680 goto oshr_exit;
681 smb2_set_next_command(tcon, &rqst[0]);
682
683 memset(&qi_iov, 0, sizeof(qi_iov));
684 rqst[1].rq_iov = qi_iov;
685 rqst[1].rq_nvec = 1;
686
687 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
688 COMPOUND_FID, FILE_ALL_INFORMATION,
689 SMB2_O_INFO_FILE, 0,
690 sizeof(struct smb2_file_all_info) +
691 PATH_MAX * 2, 0, NULL);
692 if (rc)
693 goto oshr_exit;
694
695 smb2_set_related(&rqst[1]);
696
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200697 /*
698 * We do not hold the lock for the open because in case
699 * SMB2_open needs to reconnect, it will end up calling
700 * cifs_mark_open_files_invalid() which takes the lock again
701 * thus causing a deadlock
702 */
703
704 mutex_unlock(&tcon->crfid.fid_mutex);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000705 rc = compound_send_recv(xid, ses, flags, 2, rqst,
706 resp_buftype, rsp_iov);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200707 mutex_lock(&tcon->crfid.fid_mutex);
708
709 /*
710 * Now we need to check again as the cached root might have
711 * been successfully re-opened from a concurrent process
712 */
713
714 if (tcon->crfid.is_valid) {
715 /* work was already done */
716
717 /* stash fids for close() later */
718 struct cifs_fid fid = {
719 .persistent_fid = pfid->persistent_fid,
720 .volatile_fid = pfid->volatile_fid,
721 };
722
723 /*
724 * caller expects this func to set pfid to a valid
725 * cached root, so we copy the existing one and get a
726 * reference.
727 */
728 memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
729 kref_get(&tcon->crfid.refcount);
730
731 mutex_unlock(&tcon->crfid.fid_mutex);
732
733 if (rc == 0) {
734 /* close extra handle outside of crit sec */
735 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
736 }
737 goto oshr_free;
738 }
739
740 /* Cached root is still invalid, continue normaly */
741
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000742 if (rc)
743 goto oshr_exit;
744
745 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
746 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
747 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
748#ifdef CONFIG_CIFS_DEBUG2
749 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
750#endif /* CIFS_DEBUG2 */
751
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000752 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
753 tcon->crfid.tcon = tcon;
754 tcon->crfid.is_valid = true;
755 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000756
Steve French89a5bfa2019-07-18 17:22:18 -0500757 /* BB TBD check to see if oplock level check can be removed below */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000758 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
759 kref_get(&tcon->crfid.refcount);
Steve French89a5bfa2019-07-18 17:22:18 -0500760 smb2_parse_contexts(server, o_rsp,
761 &oparms.fid->epoch,
762 oparms.fid->lease_key, &oplock, NULL);
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000763 } else
764 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000765
766 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
767 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
768 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000769 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000770 le16_to_cpu(qi_rsp->OutputBufferOffset),
771 sizeof(struct smb2_file_all_info),
772 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000773 (char *)&tcon->crfid.file_all_info))
774 tcon->crfid.file_all_info_is_valid = 1;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000775
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200776oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000777 mutex_unlock(&tcon->crfid.fid_mutex);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200778oshr_free:
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000779 SMB2_open_free(&rqst[0]);
780 SMB2_query_info_free(&rqst[1]);
781 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
782 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French3d4ef9a2018-04-25 22:19:09 -0500783 return rc;
784}
785
Steve French34f62642013-10-09 02:07:00 -0500786static void
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500787smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
788{
789 int rc;
790 __le16 srch_path = 0; /* Null - open root of share */
791 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
792 struct cifs_open_parms oparms;
793 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500794 bool no_cached_open = tcon->nohandlecache;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500795
796 oparms.tcon = tcon;
797 oparms.desired_access = FILE_READ_ATTRIBUTES;
798 oparms.disposition = FILE_OPEN;
799 oparms.create_options = 0;
800 oparms.fid = &fid;
801 oparms.reconnect = false;
802
Steve French3d4ef9a2018-04-25 22:19:09 -0500803 if (no_cached_open)
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000804 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
805 NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500806 else
807 rc = open_shroot(xid, tcon, &fid);
808
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500809 if (rc)
810 return;
811
Steve Frenchc481e9f2013-10-14 01:21:53 -0500812 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500813
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500814 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
815 FS_ATTRIBUTE_INFORMATION);
816 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
817 FS_DEVICE_INFORMATION);
818 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500819 FS_VOLUME_INFORMATION);
820 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500821 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500822 if (no_cached_open)
823 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000824 else
825 close_shroot(&tcon->crfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500826}
827
828static void
Steve French34f62642013-10-09 02:07:00 -0500829smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
830{
831 int rc;
832 __le16 srch_path = 0; /* Null - open root of share */
833 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
834 struct cifs_open_parms oparms;
835 struct cifs_fid fid;
836
837 oparms.tcon = tcon;
838 oparms.desired_access = FILE_READ_ATTRIBUTES;
839 oparms.disposition = FILE_OPEN;
840 oparms.create_options = 0;
841 oparms.fid = &fid;
842 oparms.reconnect = false;
843
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000844 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500845 if (rc)
846 return;
847
Steven French21671142013-10-09 13:36:35 -0500848 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
849 FS_ATTRIBUTE_INFORMATION);
850 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
851 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500852 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -0500853}
854
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400855static int
856smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
857 struct cifs_sb_info *cifs_sb, const char *full_path)
858{
859 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400860 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700861 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400862 struct cifs_open_parms oparms;
863 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400864
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000865 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500866 return 0;
867
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400868 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
869 if (!utf16_path)
870 return -ENOMEM;
871
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400872 oparms.tcon = tcon;
873 oparms.desired_access = FILE_READ_ATTRIBUTES;
874 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -0500875 if (backup_cred(cifs_sb))
876 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
877 else
878 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400879 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400880 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400881
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000882 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400883 if (rc) {
884 kfree(utf16_path);
885 return rc;
886 }
887
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400888 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400889 kfree(utf16_path);
890 return rc;
891}
892
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400893static int
894smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
895 struct cifs_sb_info *cifs_sb, const char *full_path,
896 u64 *uniqueid, FILE_ALL_INFO *data)
897{
898 *uniqueid = le64_to_cpu(data->IndexNumber);
899 return 0;
900}
901
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700902static int
903smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
904 struct cifs_fid *fid, FILE_ALL_INFO *data)
905{
906 int rc;
907 struct smb2_file_all_info *smb2_data;
908
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400909 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700910 GFP_KERNEL);
911 if (smb2_data == NULL)
912 return -ENOMEM;
913
914 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
915 smb2_data);
916 if (!rc)
917 move_smb2_info_to_cifs(data, smb2_data);
918 kfree(smb2_data);
919 return rc;
920}
921
Arnd Bergmann1368f152017-09-05 11:24:15 +0200922#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000923static ssize_t
924move_smb2_ea_to_cifs(char *dst, size_t dst_size,
925 struct smb2_file_full_ea_info *src, size_t src_size,
926 const unsigned char *ea_name)
927{
928 int rc = 0;
929 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
930 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000931 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000932 size_t name_len, value_len, user_name_len;
933
934 while (src_size > 0) {
935 name = &src->ea_data[0];
936 name_len = (size_t)src->ea_name_length;
937 value = &src->ea_data[src->ea_name_length + 1];
938 value_len = (size_t)le16_to_cpu(src->ea_value_length);
939
Christoph Probsta205d502019-05-08 21:36:25 +0200940 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000941 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000942
943 if (src_size < 8 + name_len + 1 + value_len) {
944 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
945 rc = -EIO;
946 goto out;
947 }
948
949 if (ea_name) {
950 if (ea_name_len == name_len &&
951 memcmp(ea_name, name, name_len) == 0) {
952 rc = value_len;
953 if (dst_size == 0)
954 goto out;
955 if (dst_size < value_len) {
956 rc = -ERANGE;
957 goto out;
958 }
959 memcpy(dst, value, value_len);
960 goto out;
961 }
962 } else {
963 /* 'user.' plus a terminating null */
964 user_name_len = 5 + 1 + name_len;
965
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000966 if (buf_size == 0) {
967 /* skip copy - calc size only */
968 rc += user_name_len;
969 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000970 dst_size -= user_name_len;
971 memcpy(dst, "user.", 5);
972 dst += 5;
973 memcpy(dst, src->ea_data, name_len);
974 dst += name_len;
975 *dst = 0;
976 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000977 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000978 } else {
979 /* stop before overrun buffer */
980 rc = -ERANGE;
981 break;
982 }
983 }
984
985 if (!src->next_entry_offset)
986 break;
987
988 if (src_size < le32_to_cpu(src->next_entry_offset)) {
989 /* stop before overrun buffer */
990 rc = -ERANGE;
991 break;
992 }
993 src_size -= le32_to_cpu(src->next_entry_offset);
994 src = (void *)((char *)src +
995 le32_to_cpu(src->next_entry_offset));
996 }
997
998 /* didn't find the named attribute */
999 if (ea_name)
1000 rc = -ENODATA;
1001
1002out:
1003 return (ssize_t)rc;
1004}
1005
1006static ssize_t
1007smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1008 const unsigned char *path, const unsigned char *ea_name,
1009 char *ea_data, size_t buf_size,
1010 struct cifs_sb_info *cifs_sb)
1011{
1012 int rc;
1013 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001014 struct kvec rsp_iov = {NULL, 0};
1015 int buftype = CIFS_NO_BUFFER;
1016 struct smb2_query_info_rsp *rsp;
1017 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001018
1019 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1020 if (!utf16_path)
1021 return -ENOMEM;
1022
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001023 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1024 FILE_READ_EA,
1025 FILE_FULL_EA_INFORMATION,
1026 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +10001027 CIFSMaxBufSize -
1028 MAX_SMB2_CREATE_RESPONSE_SIZE -
1029 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001030 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001031 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001032 /*
1033 * If ea_name is NULL (listxattr) and there are no EAs,
1034 * return 0 as it's not an error. Otherwise, the specified
1035 * ea_name was not found.
1036 */
1037 if (!ea_name && rc == -ENODATA)
1038 rc = 0;
1039 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001040 }
1041
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001042 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1043 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1044 le32_to_cpu(rsp->OutputBufferLength),
1045 &rsp_iov,
1046 sizeof(struct smb2_file_full_ea_info));
1047 if (rc)
1048 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001049
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001050 info = (struct smb2_file_full_ea_info *)(
1051 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1052 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1053 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001054
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001055 qeas_exit:
1056 kfree(utf16_path);
1057 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001058 return rc;
1059}
1060
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001061
1062static int
1063smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1064 const char *path, const char *ea_name, const void *ea_value,
1065 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1066 struct cifs_sb_info *cifs_sb)
1067{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001068 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001069 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001070 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001071 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001072 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001073 struct smb_rqst rqst[3];
1074 int resp_buftype[3];
1075 struct kvec rsp_iov[3];
1076 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1077 struct cifs_open_parms oparms;
1078 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1079 struct cifs_fid fid;
1080 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1081 unsigned int size[1];
1082 void *data[1];
1083 struct smb2_file_full_ea_info *ea = NULL;
1084 struct kvec close_iov[1];
1085 int rc;
1086
1087 if (smb3_encryption_required(tcon))
1088 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001089
1090 if (ea_name_len > 255)
1091 return -EINVAL;
1092
1093 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1094 if (!utf16_path)
1095 return -ENOMEM;
1096
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001097 memset(rqst, 0, sizeof(rqst));
1098 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1099 memset(rsp_iov, 0, sizeof(rsp_iov));
1100
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001101 if (ses->server->ops->query_all_EAs) {
1102 if (!ea_value) {
1103 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1104 ea_name, NULL, 0,
1105 cifs_sb);
1106 if (rc == -ENODATA)
1107 goto sea_exit;
1108 }
1109 }
1110
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001111 /* Open */
1112 memset(&open_iov, 0, sizeof(open_iov));
1113 rqst[0].rq_iov = open_iov;
1114 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1115
1116 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001117 oparms.tcon = tcon;
1118 oparms.desired_access = FILE_WRITE_EA;
1119 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001120 if (backup_cred(cifs_sb))
1121 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1122 else
1123 oparms.create_options = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001124 oparms.fid = &fid;
1125 oparms.reconnect = false;
1126
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001127 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
1128 if (rc)
1129 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001130 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001131
1132
1133 /* Set Info */
1134 memset(&si_iov, 0, sizeof(si_iov));
1135 rqst[1].rq_iov = si_iov;
1136 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001137
1138 len = sizeof(ea) + ea_name_len + ea_value_len + 1;
1139 ea = kzalloc(len, GFP_KERNEL);
1140 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001141 rc = -ENOMEM;
1142 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001143 }
1144
1145 ea->ea_name_length = ea_name_len;
1146 ea->ea_value_length = cpu_to_le16(ea_value_len);
1147 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1148 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1149
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001150 size[0] = len;
1151 data[0] = ea;
1152
1153 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
1154 COMPOUND_FID, current->tgid,
1155 FILE_FULL_EA_INFORMATION,
1156 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001157 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001158 smb2_set_related(&rqst[1]);
1159
1160
1161 /* Close */
1162 memset(&close_iov, 0, sizeof(close_iov));
1163 rqst[2].rq_iov = close_iov;
1164 rqst[2].rq_nvec = 1;
1165 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
1166 smb2_set_related(&rqst[2]);
1167
1168 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1169 resp_buftype, rsp_iov);
1170
1171 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001172 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001173 kfree(utf16_path);
1174 SMB2_open_free(&rqst[0]);
1175 SMB2_set_info_free(&rqst[1]);
1176 SMB2_close_free(&rqst[2]);
1177 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1178 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1179 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001180 return rc;
1181}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001182#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001183
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001184static bool
1185smb2_can_echo(struct TCP_Server_Info *server)
1186{
1187 return server->echoes;
1188}
1189
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001190static void
1191smb2_clear_stats(struct cifs_tcon *tcon)
1192{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001193 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001194
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001195 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1196 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1197 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1198 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001199}
1200
1201static void
Steve French769ee6a2013-06-19 14:15:30 -05001202smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1203{
1204 seq_puts(m, "\n\tShare Capabilities:");
1205 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1206 seq_puts(m, " DFS,");
1207 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1208 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1209 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1210 seq_puts(m, " SCALEOUT,");
1211 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1212 seq_puts(m, " CLUSTER,");
1213 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1214 seq_puts(m, " ASYMMETRIC,");
1215 if (tcon->capabilities == 0)
1216 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001217 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1218 seq_puts(m, " Aligned,");
1219 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1220 seq_puts(m, " Partition Aligned,");
1221 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1222 seq_puts(m, " SSD,");
1223 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1224 seq_puts(m, " TRIM-support,");
1225
Steve French769ee6a2013-06-19 14:15:30 -05001226 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001227 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001228 if (tcon->perf_sector_size)
1229 seq_printf(m, "\tOptimal sector size: 0x%x",
1230 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001231 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001232}
1233
1234static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001235smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1236{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001237 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1238 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001239
1240 /*
1241 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1242 * totals (requests sent) since those SMBs are per-session not per tcon
1243 */
Steve French52ce1ac2018-07-31 01:46:47 -05001244 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1245 (long long)(tcon->bytes_read),
1246 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001247 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1248 atomic_read(&tcon->num_local_opens),
1249 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001250 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001251 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1252 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001253 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001254 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1255 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001256 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001257 atomic_read(&sent[SMB2_CREATE_HE]),
1258 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001259 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001260 atomic_read(&sent[SMB2_CLOSE_HE]),
1261 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001262 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001263 atomic_read(&sent[SMB2_FLUSH_HE]),
1264 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001265 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001266 atomic_read(&sent[SMB2_READ_HE]),
1267 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001268 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001269 atomic_read(&sent[SMB2_WRITE_HE]),
1270 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001271 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001272 atomic_read(&sent[SMB2_LOCK_HE]),
1273 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001274 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001275 atomic_read(&sent[SMB2_IOCTL_HE]),
1276 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001277 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001278 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1279 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001280 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001281 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1282 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001283 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001284 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1285 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001286 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001287 atomic_read(&sent[SMB2_SET_INFO_HE]),
1288 atomic_read(&failed[SMB2_SET_INFO_HE]));
1289 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1290 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1291 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001292}
1293
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001294static void
1295smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1296{
David Howells2b0143b2015-03-17 22:25:59 +00001297 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001298 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1299
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001300 cfile->fid.persistent_fid = fid->persistent_fid;
1301 cfile->fid.volatile_fid = fid->volatile_fid;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001302#ifdef CONFIG_CIFS_DEBUG2
1303 cfile->fid.mid = fid->mid;
1304#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001305 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1306 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001307 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001308 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001309}
1310
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001311static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001312smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1313 struct cifs_fid *fid)
1314{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001315 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001316}
1317
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001318static int
Steve French41c13582013-11-14 00:05:36 -06001319SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1320 u64 persistent_fid, u64 volatile_fid,
1321 struct copychunk_ioctl *pcchunk)
1322{
1323 int rc;
1324 unsigned int ret_data_len;
1325 struct resume_key_req *res_key;
1326
1327 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1328 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001329 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001330 (char **)&res_key, &ret_data_len);
1331
1332 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001333 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
Steve French41c13582013-11-14 00:05:36 -06001334 goto req_res_key_exit;
1335 }
1336 if (ret_data_len < sizeof(struct resume_key_req)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001337 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
Steve French41c13582013-11-14 00:05:36 -06001338 rc = -EINVAL;
1339 goto req_res_key_exit;
1340 }
1341 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1342
1343req_res_key_exit:
1344 kfree(res_key);
1345 return rc;
1346}
1347
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001348static int
1349smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001350 struct cifs_tcon *tcon,
1351 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001352 unsigned long p)
1353{
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001354 struct cifs_ses *ses = tcon->ses;
1355 char __user *arg = (char __user *)p;
1356 struct smb_query_info qi;
1357 struct smb_query_info __user *pqi;
1358 int rc = 0;
1359 int flags = 0;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001360 struct smb2_query_info_rsp *qi_rsp = NULL;
1361 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001362 void *buffer = NULL;
1363 struct smb_rqst rqst[3];
1364 int resp_buftype[3];
1365 struct kvec rsp_iov[3];
1366 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1367 struct cifs_open_parms oparms;
1368 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1369 struct cifs_fid fid;
1370 struct kvec qi_iov[1];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001371 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001372 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001373 struct kvec close_iov[1];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001374 unsigned int size[2];
1375 void *data[2];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001376
1377 memset(rqst, 0, sizeof(rqst));
1378 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1379 memset(rsp_iov, 0, sizeof(rsp_iov));
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001380
1381 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1382 return -EFAULT;
1383
1384 if (qi.output_buffer_length > 1024)
1385 return -EINVAL;
1386
1387 if (!ses || !(ses->server))
1388 return -EIO;
1389
1390 if (smb3_encryption_required(tcon))
1391 flags |= CIFS_TRANSFORM_REQ;
1392
1393 buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
1394 if (buffer == NULL)
1395 return -ENOMEM;
1396
1397 if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
1398 qi.output_buffer_length)) {
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001399 rc = -EFAULT;
1400 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001401 }
1402
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001403 /* Open */
1404 memset(&open_iov, 0, sizeof(open_iov));
1405 rqst[0].rq_iov = open_iov;
1406 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001407
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001408 memset(&oparms, 0, sizeof(oparms));
1409 oparms.tcon = tcon;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001410 oparms.disposition = FILE_OPEN;
1411 if (is_dir)
1412 oparms.create_options = CREATE_NOT_FILE;
1413 else
1414 oparms.create_options = CREATE_NOT_DIR;
1415 oparms.fid = &fid;
1416 oparms.reconnect = false;
1417
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001418 if (qi.flags & PASSTHRU_FSCTL) {
1419 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1420 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1421 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001422 break;
1423 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1424 oparms.desired_access = GENERIC_ALL;
1425 break;
1426 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1427 oparms.desired_access = GENERIC_READ;
1428 break;
1429 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1430 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001431 break;
1432 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001433 } else if (qi.flags & PASSTHRU_SET_INFO) {
1434 oparms.desired_access = GENERIC_WRITE;
1435 } else {
1436 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001437 }
1438
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001439 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1440 if (rc)
1441 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001442 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001443
1444 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001445 if (qi.flags & PASSTHRU_FSCTL) {
1446 /* Can eventually relax perm check since server enforces too */
1447 if (!capable(CAP_SYS_ADMIN))
1448 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001449 else {
1450 memset(&io_iov, 0, sizeof(io_iov));
1451 rqst[1].rq_iov = io_iov;
1452 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1453
1454 rc = SMB2_ioctl_init(tcon, &rqst[1],
1455 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001456 qi.info_type, true, buffer,
1457 qi.output_buffer_length,
1458 CIFSMaxBufSize);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001459 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001460 } else if (qi.flags == PASSTHRU_SET_INFO) {
1461 /* Can eventually relax perm check since server enforces too */
1462 if (!capable(CAP_SYS_ADMIN))
1463 rc = -EPERM;
1464 else {
1465 memset(&si_iov, 0, sizeof(si_iov));
1466 rqst[1].rq_iov = si_iov;
1467 rqst[1].rq_nvec = 1;
1468
1469 size[0] = 8;
1470 data[0] = buffer;
1471
1472 rc = SMB2_set_info_init(tcon, &rqst[1],
1473 COMPOUND_FID, COMPOUND_FID,
1474 current->tgid,
1475 FILE_END_OF_FILE_INFORMATION,
1476 SMB2_O_INFO_FILE, 0, data, size);
1477 }
Steve French31ba4332019-03-13 02:40:07 -05001478 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1479 memset(&qi_iov, 0, sizeof(qi_iov));
1480 rqst[1].rq_iov = qi_iov;
1481 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001482
Steve French31ba4332019-03-13 02:40:07 -05001483 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
1484 COMPOUND_FID, qi.file_info_class,
1485 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001486 qi.input_buffer_length,
1487 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001488 } else { /* unknown flags */
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001489 cifs_tcon_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
Steve French31ba4332019-03-13 02:40:07 -05001490 rc = -EINVAL;
1491 }
1492
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001493 if (rc)
1494 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001495 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001496 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001497
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001498 /* Close */
1499 memset(&close_iov, 0, sizeof(close_iov));
1500 rqst[2].rq_iov = close_iov;
1501 rqst[2].rq_nvec = 1;
1502
1503 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001504 if (rc)
1505 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001506 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001507
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001508 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1509 resp_buftype, rsp_iov);
1510 if (rc)
1511 goto iqinf_exit;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001512 if (qi.flags & PASSTHRU_FSCTL) {
1513 pqi = (struct smb_query_info __user *)arg;
1514 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1515 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1516 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001517 if (qi.input_buffer_length > 0 &&
1518 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length > rsp_iov[1].iov_len) {
1519 rc = -EFAULT;
1520 goto iqinf_exit;
1521 }
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001522 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1523 sizeof(qi.input_buffer_length))) {
1524 rc = -EFAULT;
1525 goto iqinf_exit;
1526 }
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001527 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1528 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
1529 qi.input_buffer_length)) {
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001530 rc = -EFAULT;
1531 goto iqinf_exit;
1532 }
1533 } else {
1534 pqi = (struct smb_query_info __user *)arg;
1535 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1536 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1537 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
1538 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1539 sizeof(qi.input_buffer_length))) {
1540 rc = -EFAULT;
1541 goto iqinf_exit;
1542 }
1543 if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) {
1544 rc = -EFAULT;
1545 goto iqinf_exit;
1546 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001547 }
1548
1549 iqinf_exit:
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001550 kfree(buffer);
1551 SMB2_open_free(&rqst[0]);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001552 if (qi.flags & PASSTHRU_FSCTL)
1553 SMB2_ioctl_free(&rqst[1]);
1554 else
1555 SMB2_query_info_free(&rqst[1]);
1556
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001557 SMB2_close_free(&rqst[2]);
1558 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1559 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1560 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001561 return rc;
1562}
1563
Sachin Prabhu620d8742017-02-10 16:03:51 +05301564static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001565smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001566 struct cifsFileInfo *srcfile,
1567 struct cifsFileInfo *trgtfile, u64 src_off,
1568 u64 len, u64 dest_off)
1569{
1570 int rc;
1571 unsigned int ret_data_len;
1572 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001573 struct copychunk_ioctl_rsp *retbuf = NULL;
1574 struct cifs_tcon *tcon;
1575 int chunks_copied = 0;
1576 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301577 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001578
1579 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1580
1581 if (pcchunk == NULL)
1582 return -ENOMEM;
1583
Christoph Probsta205d502019-05-08 21:36:25 +02001584 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001585 /* Request a key from the server to identify the source of the copy */
1586 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1587 srcfile->fid.persistent_fid,
1588 srcfile->fid.volatile_fid, pcchunk);
1589
1590 /* Note: request_res_key sets res_key null only if rc !=0 */
1591 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001592 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001593
1594 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001595 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001596 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001597 pcchunk->Reserved2 = 0;
1598
Steve French9bf0c9c2013-11-16 18:05:28 -06001599 tcon = tlink_tcon(trgtfile->tlink);
1600
1601 while (len > 0) {
1602 pcchunk->SourceOffset = cpu_to_le64(src_off);
1603 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1604 pcchunk->Length =
1605 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1606
1607 /* Request server copy to target from src identified by key */
1608 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001609 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001610 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001611 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1612 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001613 if (rc == 0) {
1614 if (ret_data_len !=
1615 sizeof(struct copychunk_ioctl_rsp)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001616 cifs_tcon_dbg(VFS, "invalid cchunk response size\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001617 rc = -EIO;
1618 goto cchunk_out;
1619 }
1620 if (retbuf->TotalBytesWritten == 0) {
1621 cifs_dbg(FYI, "no bytes copied\n");
1622 rc = -EIO;
1623 goto cchunk_out;
1624 }
1625 /*
1626 * Check if server claimed to write more than we asked
1627 */
1628 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1629 le32_to_cpu(pcchunk->Length)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001630 cifs_tcon_dbg(VFS, "invalid copy chunk response\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001631 rc = -EIO;
1632 goto cchunk_out;
1633 }
1634 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001635 cifs_tcon_dbg(VFS, "invalid num chunks written\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001636 rc = -EIO;
1637 goto cchunk_out;
1638 }
1639 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001640
Sachin Prabhu620d8742017-02-10 16:03:51 +05301641 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1642 src_off += bytes_written;
1643 dest_off += bytes_written;
1644 len -= bytes_written;
1645 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001646
Sachin Prabhu620d8742017-02-10 16:03:51 +05301647 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001648 le32_to_cpu(retbuf->ChunksWritten),
1649 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301650 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001651 } else if (rc == -EINVAL) {
1652 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1653 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001654
Steve French9bf0c9c2013-11-16 18:05:28 -06001655 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1656 le32_to_cpu(retbuf->ChunksWritten),
1657 le32_to_cpu(retbuf->ChunkBytesWritten),
1658 le32_to_cpu(retbuf->TotalBytesWritten));
1659
1660 /*
1661 * Check if this is the first request using these sizes,
1662 * (ie check if copy succeed once with original sizes
1663 * and check if the server gave us different sizes after
1664 * we already updated max sizes on previous request).
1665 * if not then why is the server returning an error now
1666 */
1667 if ((chunks_copied != 0) || chunk_sizes_updated)
1668 goto cchunk_out;
1669
1670 /* Check that server is not asking us to grow size */
1671 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1672 tcon->max_bytes_chunk)
1673 tcon->max_bytes_chunk =
1674 le32_to_cpu(retbuf->ChunkBytesWritten);
1675 else
1676 goto cchunk_out; /* server gave us bogus size */
1677
1678 /* No need to change MaxChunks since already set to 1 */
1679 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001680 } else
1681 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001682 }
1683
1684cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001685 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001686 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301687 if (rc)
1688 return rc;
1689 else
1690 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001691}
1692
1693static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001694smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1695 struct cifs_fid *fid)
1696{
1697 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1698}
1699
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001700static unsigned int
1701smb2_read_data_offset(char *buf)
1702{
1703 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001704
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001705 return rsp->DataOffset;
1706}
1707
1708static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001709smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001710{
1711 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001712
1713 if (in_remaining)
1714 return le32_to_cpu(rsp->DataRemaining);
1715
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001716 return le32_to_cpu(rsp->DataLength);
1717}
1718
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001719
1720static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001721smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001722 struct cifs_io_parms *parms, unsigned int *bytes_read,
1723 char **buf, int *buf_type)
1724{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001725 parms->persistent_fid = pfid->persistent_fid;
1726 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001727 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1728}
1729
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001730static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001731smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001732 struct cifs_io_parms *parms, unsigned int *written,
1733 struct kvec *iov, unsigned long nr_segs)
1734{
1735
Steve Frenchdb8b6312014-09-22 05:13:55 -05001736 parms->persistent_fid = pfid->persistent_fid;
1737 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001738 return SMB2_write(xid, parms, written, iov, nr_segs);
1739}
1740
Steve Frenchd43cc792014-08-13 17:16:29 -05001741/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1742static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1743 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1744{
1745 struct cifsInodeInfo *cifsi;
1746 int rc;
1747
1748 cifsi = CIFS_I(inode);
1749
1750 /* if file already sparse don't bother setting sparse again */
1751 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1752 return true; /* already sparse */
1753
1754 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1755 return true; /* already not sparse */
1756
1757 /*
1758 * Can't check for sparse support on share the usual way via the
1759 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1760 * since Samba server doesn't set the flag on the share, yet
1761 * supports the set sparse FSCTL and returns sparse correctly
1762 * in the file attributes. If we fail setting sparse though we
1763 * mark that server does not support sparse files for this share
1764 * to avoid repeatedly sending the unsupported fsctl to server
1765 * if the file is repeatedly extended.
1766 */
1767 if (tcon->broken_sparse_sup)
1768 return false;
1769
1770 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1771 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001772 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05001773 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001774 if (rc) {
1775 tcon->broken_sparse_sup = true;
1776 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1777 return false;
1778 }
1779
1780 if (setsparse)
1781 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1782 else
1783 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1784
1785 return true;
1786}
1787
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001788static int
1789smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1790 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1791{
1792 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001793 struct inode *inode;
1794
1795 /*
1796 * If extending file more than one page make sparse. Many Linux fs
1797 * make files sparse by default when extending via ftruncate
1798 */
David Howells2b0143b2015-03-17 22:25:59 +00001799 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001800
1801 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001802 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001803
Steve Frenchd43cc792014-08-13 17:16:29 -05001804 /* whether set sparse succeeds or not, extend the file */
1805 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001806 }
1807
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001808 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001809 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001810}
1811
Steve French02b16662015-06-27 21:18:36 -07001812static int
1813smb2_duplicate_extents(const unsigned int xid,
1814 struct cifsFileInfo *srcfile,
1815 struct cifsFileInfo *trgtfile, u64 src_off,
1816 u64 len, u64 dest_off)
1817{
1818 int rc;
1819 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001820 struct duplicate_extents_to_file dup_ext_buf;
1821 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1822
1823 /* server fileays advertise duplicate extent support with this flag */
1824 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1825 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1826 return -EOPNOTSUPP;
1827
1828 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1829 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1830 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1831 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1832 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02001833 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07001834 src_off, dest_off, len);
1835
1836 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1837 if (rc)
1838 goto duplicate_extents_out;
1839
1840 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1841 trgtfile->fid.volatile_fid,
1842 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001843 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001844 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001845 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05001846 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07001847 &ret_data_len);
1848
1849 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02001850 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07001851
1852duplicate_extents_out:
1853 return rc;
1854}
Steve French02b16662015-06-27 21:18:36 -07001855
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001856static int
Steve French64a5cfa2013-10-14 15:31:32 -05001857smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1858 struct cifsFileInfo *cfile)
1859{
1860 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1861 cfile->fid.volatile_fid);
1862}
1863
1864static int
Steve Frenchb3152e22015-06-24 03:17:02 -05001865smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1866 struct cifsFileInfo *cfile)
1867{
1868 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05001869 unsigned int ret_data_len;
1870
1871 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1872 integr_info.Flags = 0;
1873 integr_info.Reserved = 0;
1874
1875 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1876 cfile->fid.volatile_fid,
1877 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001878 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001879 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05001880 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05001881 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05001882 &ret_data_len);
1883
1884}
1885
Steve Frenche02789a2018-08-09 14:33:12 -05001886/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1887#define GMT_TOKEN_SIZE 50
1888
Steve French153322f2019-03-28 22:32:49 -05001889#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1890
Steve Frenche02789a2018-08-09 14:33:12 -05001891/*
1892 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1893 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1894 */
Steve Frenchb3152e22015-06-24 03:17:02 -05001895static int
Steve French834170c2016-09-30 21:14:26 -05001896smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1897 struct cifsFileInfo *cfile, void __user *ioc_buf)
1898{
1899 char *retbuf = NULL;
1900 unsigned int ret_data_len = 0;
1901 int rc;
Steve French153322f2019-03-28 22:32:49 -05001902 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05001903 struct smb_snapshot_array snapshot_in;
1904
Steve French973189a2019-04-04 00:41:04 -05001905 /*
1906 * On the first query to enumerate the list of snapshots available
1907 * for this volume the buffer begins with 0 (number of snapshots
1908 * which can be returned is zero since at that point we do not know
1909 * how big the buffer needs to be). On the second query,
1910 * it (ret_data_len) is set to number of snapshots so we can
1911 * know to set the maximum response size larger (see below).
1912 */
Steve French153322f2019-03-28 22:32:49 -05001913 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
1914 return -EFAULT;
1915
1916 /*
1917 * Note that for snapshot queries that servers like Azure expect that
1918 * the first query be minimal size (and just used to get the number/size
1919 * of previous versions) so response size must be specified as EXACTLY
1920 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
1921 * of eight bytes.
1922 */
1923 if (ret_data_len == 0)
1924 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
1925 else
1926 max_response_size = CIFSMaxBufSize;
1927
Steve French834170c2016-09-30 21:14:26 -05001928 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1929 cfile->fid.volatile_fid,
1930 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001931 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001932 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05001933 (char **)&retbuf,
1934 &ret_data_len);
1935 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
1936 rc, ret_data_len);
1937 if (rc)
1938 return rc;
1939
1940 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
1941 /* Fixup buffer */
1942 if (copy_from_user(&snapshot_in, ioc_buf,
1943 sizeof(struct smb_snapshot_array))) {
1944 rc = -EFAULT;
1945 kfree(retbuf);
1946 return rc;
1947 }
Steve French834170c2016-09-30 21:14:26 -05001948
Steve Frenche02789a2018-08-09 14:33:12 -05001949 /*
1950 * Check for min size, ie not large enough to fit even one GMT
1951 * token (snapshot). On the first ioctl some users may pass in
1952 * smaller size (or zero) to simply get the size of the array
1953 * so the user space caller can allocate sufficient memory
1954 * and retry the ioctl again with larger array size sufficient
1955 * to hold all of the snapshot GMT tokens on the second try.
1956 */
1957 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
1958 ret_data_len = sizeof(struct smb_snapshot_array);
1959
1960 /*
1961 * We return struct SRV_SNAPSHOT_ARRAY, followed by
1962 * the snapshot array (of 50 byte GMT tokens) each
1963 * representing an available previous version of the data
1964 */
1965 if (ret_data_len > (snapshot_in.snapshot_array_size +
1966 sizeof(struct smb_snapshot_array)))
1967 ret_data_len = snapshot_in.snapshot_array_size +
1968 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05001969
1970 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
1971 rc = -EFAULT;
1972 }
1973
1974 kfree(retbuf);
1975 return rc;
1976}
1977
1978static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001979smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
1980 const char *path, struct cifs_sb_info *cifs_sb,
1981 struct cifs_fid *fid, __u16 search_flags,
1982 struct cifs_search_info *srch_inf)
1983{
1984 __le16 *utf16_path;
1985 int rc;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001986 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001987 struct cifs_open_parms oparms;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001988
1989 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1990 if (!utf16_path)
1991 return -ENOMEM;
1992
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001993 oparms.tcon = tcon;
1994 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
1995 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001996 if (backup_cred(cifs_sb))
1997 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1998 else
1999 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002000 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002001 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002002
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002003 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002004 kfree(utf16_path);
2005 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07002006 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002007 return rc;
2008 }
2009
2010 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02002011 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002012
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002013 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
2014 fid->volatile_fid, 0, srch_inf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002015 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07002016 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002017 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002018 }
2019 return rc;
2020}
2021
2022static int
2023smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2024 struct cifs_fid *fid, __u16 search_flags,
2025 struct cifs_search_info *srch_inf)
2026{
2027 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2028 fid->volatile_fid, 0, srch_inf);
2029}
2030
2031static int
2032smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2033 struct cifs_fid *fid)
2034{
2035 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2036}
2037
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002038/*
Christoph Probsta205d502019-05-08 21:36:25 +02002039 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2040 * the number of credits and return true. Otherwise - return false.
2041 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002042static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002043smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002044{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002045 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002046
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002047 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002048 return false;
2049
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002050 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002051 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002052 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002053 spin_unlock(&server->req_lock);
2054 wake_up(&server->request_q);
2055 }
2056
2057 return true;
2058}
2059
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002060static bool
2061smb2_is_session_expired(char *buf)
2062{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002063 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002064
Mark Symsd81243c2018-05-24 09:47:31 +01002065 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2066 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002067 return false;
2068
Steve Frenche68a9322018-07-30 14:23:58 -05002069 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2070 le16_to_cpu(shdr->Command),
2071 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002072 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002073
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002074 return true;
2075}
2076
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002077static int
2078smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2079 struct cifsInodeInfo *cinode)
2080{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002081 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2082 return SMB2_lease_break(0, tcon, cinode->lease_key,
2083 smb2_get_lease_state(cinode));
2084
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002085 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2086 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002087 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002088}
2089
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002090void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002091smb2_set_related(struct smb_rqst *rqst)
2092{
2093 struct smb2_sync_hdr *shdr;
2094
2095 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002096 if (shdr == NULL) {
2097 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2098 return;
2099 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002100 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2101}
2102
2103char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2104
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002105void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002106smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002107{
2108 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002109 struct cifs_ses *ses = tcon->ses;
2110 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002111 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002112 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002113
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002114 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2115 if (shdr == NULL) {
2116 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2117 return;
2118 }
2119
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002120 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002121
2122 /* No padding needed */
2123 if (!(len & 7))
2124 goto finished;
2125
2126 num_padding = 8 - (len & 7);
2127 if (!smb3_encryption_required(tcon)) {
2128 /*
2129 * If we do not have encryption then we can just add an extra
2130 * iov for the padding.
2131 */
2132 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2133 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2134 rqst->rq_nvec++;
2135 len += num_padding;
2136 } else {
2137 /*
2138 * We can not add a small padding iov for the encryption case
2139 * because the encryption framework can not handle the padding
2140 * iovs.
2141 * We have to flatten this into a single buffer and add
2142 * the padding to it.
2143 */
2144 for (i = 1; i < rqst->rq_nvec; i++) {
2145 memcpy(rqst->rq_iov[0].iov_base +
2146 rqst->rq_iov[0].iov_len,
2147 rqst->rq_iov[i].iov_base,
2148 rqst->rq_iov[i].iov_len);
2149 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002150 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002151 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2152 0, num_padding);
2153 rqst->rq_iov[0].iov_len += num_padding;
2154 len += num_padding;
2155 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002156 }
2157
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002158 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002159 shdr->NextCommand = cpu_to_le32(len);
2160}
2161
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002162/*
2163 * Passes the query info response back to the caller on success.
2164 * Caller need to free this with free_rsp_buf().
2165 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002166int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002167smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2168 __le16 *utf16_path, u32 desired_access,
2169 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002170 struct kvec *rsp, int *buftype,
2171 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002172{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002173 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002174 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002175 struct smb_rqst rqst[3];
2176 int resp_buftype[3];
2177 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002178 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002179 struct kvec qi_iov[1];
2180 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002181 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002182 struct cifs_open_parms oparms;
2183 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002184 int rc;
2185
2186 if (smb3_encryption_required(tcon))
2187 flags |= CIFS_TRANSFORM_REQ;
2188
2189 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002190 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002191 memset(rsp_iov, 0, sizeof(rsp_iov));
2192
2193 memset(&open_iov, 0, sizeof(open_iov));
2194 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002195 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002196
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002197 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002198 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002199 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002200 if (cifs_sb && backup_cred(cifs_sb))
2201 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2202 else
2203 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002204 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002205 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002206
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002207 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002208 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002209 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002210 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002211
2212 memset(&qi_iov, 0, sizeof(qi_iov));
2213 rqst[1].rq_iov = qi_iov;
2214 rqst[1].rq_nvec = 1;
2215
2216 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002217 class, type, 0,
2218 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002219 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002220 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002221 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002222 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002223 smb2_set_related(&rqst[1]);
2224
2225 memset(&close_iov, 0, sizeof(close_iov));
2226 rqst[2].rq_iov = close_iov;
2227 rqst[2].rq_nvec = 1;
2228
2229 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2230 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002231 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002232 smb2_set_related(&rqst[2]);
2233
2234 rc = compound_send_recv(xid, ses, flags, 3, rqst,
2235 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002236 if (rc) {
2237 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002238 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002239 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002240 *rsp = rsp_iov[1];
2241 *buftype = resp_buftype[1];
2242
2243 qic_exit:
2244 SMB2_open_free(&rqst[0]);
2245 SMB2_query_info_free(&rqst[1]);
2246 SMB2_close_free(&rqst[2]);
2247 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2248 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2249 return rc;
2250}
2251
2252static int
2253smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2254 struct kstatfs *buf)
2255{
2256 struct smb2_query_info_rsp *rsp;
2257 struct smb2_fs_full_size_info *info = NULL;
2258 __le16 utf16_path = 0; /* Null - open root of share */
2259 struct kvec rsp_iov = {NULL, 0};
2260 int buftype = CIFS_NO_BUFFER;
2261 int rc;
2262
2263
2264 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2265 FILE_READ_ATTRIBUTES,
2266 FS_FULL_SIZE_INFORMATION,
2267 SMB2_O_INFO_FILESYSTEM,
2268 sizeof(struct smb2_fs_full_size_info),
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002269 &rsp_iov, &buftype, NULL);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002270 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002271 goto qfs_exit;
2272
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002273 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002274 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002275 info = (struct smb2_fs_full_size_info *)(
2276 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2277 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2278 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002279 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002280 sizeof(struct smb2_fs_full_size_info));
2281 if (!rc)
2282 smb2_copy_fs_info_to_kstatfs(info, buf);
2283
2284qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002285 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002286 return rc;
2287}
2288
Steve French2d304212018-06-24 23:28:12 -05002289static int
2290smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2291 struct kstatfs *buf)
2292{
2293 int rc;
2294 __le16 srch_path = 0; /* Null - open root of share */
2295 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2296 struct cifs_open_parms oparms;
2297 struct cifs_fid fid;
2298
2299 if (!tcon->posix_extensions)
2300 return smb2_queryfs(xid, tcon, buf);
2301
2302 oparms.tcon = tcon;
2303 oparms.desired_access = FILE_READ_ATTRIBUTES;
2304 oparms.disposition = FILE_OPEN;
2305 oparms.create_options = 0;
2306 oparms.fid = &fid;
2307 oparms.reconnect = false;
2308
2309 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
2310 if (rc)
2311 return rc;
2312
2313 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2314 fid.volatile_fid, buf);
2315 buf->f_type = SMB2_MAGIC_NUMBER;
2316 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2317 return rc;
2318}
Steve French2d304212018-06-24 23:28:12 -05002319
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002320static bool
2321smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2322{
2323 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2324 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2325}
2326
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002327static int
2328smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2329 __u64 length, __u32 type, int lock, int unlock, bool wait)
2330{
2331 if (unlock && !lock)
2332 type = SMB2_LOCKFLAG_UNLOCK;
2333 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2334 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2335 current->tgid, length, offset, type, wait);
2336}
2337
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002338static void
2339smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2340{
2341 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2342}
2343
2344static void
2345smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2346{
2347 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2348}
2349
2350static void
2351smb2_new_lease_key(struct cifs_fid *fid)
2352{
Steve Frenchfa70b872016-09-22 00:39:34 -05002353 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002354}
2355
Aurelien Aptel9d496402017-02-13 16:16:49 +01002356static int
2357smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2358 const char *search_name,
2359 struct dfs_info3_param **target_nodes,
2360 unsigned int *num_of_nodes,
2361 const struct nls_table *nls_codepage, int remap)
2362{
2363 int rc;
2364 __le16 *utf16_path = NULL;
2365 int utf16_path_len = 0;
2366 struct cifs_tcon *tcon;
2367 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2368 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2369 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2370
Christoph Probsta205d502019-05-08 21:36:25 +02002371 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002372
2373 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002374 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002375 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002376 tcon = ses->tcon_ipc;
2377 if (tcon == NULL) {
2378 spin_lock(&cifs_tcp_ses_lock);
2379 tcon = list_first_entry_or_null(&ses->tcon_list,
2380 struct cifs_tcon,
2381 tcon_list);
2382 if (tcon)
2383 tcon->tc_count++;
2384 spin_unlock(&cifs_tcp_ses_lock);
2385 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002386
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002387 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002388 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2389 ses);
2390 rc = -ENOTCONN;
2391 goto out;
2392 }
2393
2394 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2395 &utf16_path_len,
2396 nls_codepage, remap);
2397 if (!utf16_path) {
2398 rc = -ENOMEM;
2399 goto out;
2400 }
2401
2402 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2403 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2404 if (!dfs_req) {
2405 rc = -ENOMEM;
2406 goto out;
2407 }
2408
2409 /* Highest DFS referral version understood */
2410 dfs_req->MaxReferralLevel = DFS_VERSION;
2411
2412 /* Path to resolve in an UTF-16 null-terminated string */
2413 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2414
2415 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002416 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2417 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002418 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002419 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002420 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002421 } while (rc == -EAGAIN);
2422
2423 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002424 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002425 cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002426 goto out;
2427 }
2428
2429 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2430 num_of_nodes, target_nodes,
2431 nls_codepage, remap, search_name,
2432 true /* is_unicode */);
2433 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002434 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002435 goto out;
2436 }
2437
2438 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002439 if (tcon && !tcon->ipc) {
2440 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002441 spin_lock(&cifs_tcp_ses_lock);
2442 tcon->tc_count--;
2443 spin_unlock(&cifs_tcp_ses_lock);
2444 }
2445 kfree(utf16_path);
2446 kfree(dfs_req);
2447 kfree(dfs_rsp);
2448 return rc;
2449}
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002450
2451static int
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002452parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2453 u32 plen, char **target_path,
2454 struct cifs_sb_info *cifs_sb)
2455{
2456 unsigned int len;
2457
2458 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2459 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2460
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002461 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2462 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2463 le64_to_cpu(symlink_buf->InodeType));
2464 return -EOPNOTSUPP;
2465 }
2466
2467 *target_path = cifs_strndup_from_utf16(
2468 symlink_buf->PathBuffer,
2469 len, true, cifs_sb->local_nls);
2470 if (!(*target_path))
2471 return -ENOMEM;
2472
2473 convert_delimiter(*target_path, '/');
2474 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2475
2476 return 0;
2477}
2478
2479static int
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002480parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2481 u32 plen, char **target_path,
2482 struct cifs_sb_info *cifs_sb)
2483{
2484 unsigned int sub_len;
2485 unsigned int sub_offset;
2486
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002487 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002488
2489 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2490 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2491 if (sub_offset + 20 > plen ||
2492 sub_offset + sub_len + 20 > plen) {
2493 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2494 return -EIO;
2495 }
2496
2497 *target_path = cifs_strndup_from_utf16(
2498 symlink_buf->PathBuffer + sub_offset,
2499 sub_len, true, cifs_sb->local_nls);
2500 if (!(*target_path))
2501 return -ENOMEM;
2502
2503 convert_delimiter(*target_path, '/');
2504 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2505
2506 return 0;
2507}
2508
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002509static int
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002510parse_reparse_point(struct reparse_data_buffer *buf,
2511 u32 plen, char **target_path,
2512 struct cifs_sb_info *cifs_sb)
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002513{
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002514 if (plen < sizeof(struct reparse_data_buffer)) {
2515 cifs_dbg(VFS, "reparse buffer is too small. Must be "
2516 "at least 8 bytes but was %d\n", plen);
2517 return -EIO;
2518 }
2519
2520 if (plen < le16_to_cpu(buf->ReparseDataLength) +
2521 sizeof(struct reparse_data_buffer)) {
2522 cifs_dbg(VFS, "srv returned invalid reparse buf "
2523 "length: %d\n", plen);
2524 return -EIO;
2525 }
2526
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002527 /* See MS-FSCC 2.1.2 */
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002528 switch (le32_to_cpu(buf->ReparseTag)) {
2529 case IO_REPARSE_TAG_NFS:
2530 return parse_reparse_posix(
2531 (struct reparse_posix_data *)buf,
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002532 plen, target_path, cifs_sb);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002533 case IO_REPARSE_TAG_SYMLINK:
2534 return parse_reparse_symlink(
2535 (struct reparse_symlink_data_buffer *)buf,
2536 plen, target_path, cifs_sb);
2537 default:
2538 cifs_dbg(VFS, "srv returned unknown symlink buffer "
2539 "tag:0x%08x\n", le32_to_cpu(buf->ReparseTag));
2540 return -EOPNOTSUPP;
2541 }
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002542}
2543
Pavel Shilovsky78932422016-07-24 10:37:38 +03002544#define SMB2_SYMLINK_STRUCT_SIZE \
2545 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2546
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002547static int
2548smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002549 struct cifs_sb_info *cifs_sb, const char *full_path,
2550 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002551{
2552 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002553 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002554 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2555 struct cifs_open_parms oparms;
2556 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002557 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002558 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002559 struct smb2_symlink_err_rsp *symlink;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002560 unsigned int sub_len;
2561 unsigned int sub_offset;
2562 unsigned int print_len;
2563 unsigned int print_offset;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002564 int flags = 0;
2565 struct smb_rqst rqst[3];
2566 int resp_buftype[3];
2567 struct kvec rsp_iov[3];
2568 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2569 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
2570 struct kvec close_iov[1];
2571 struct smb2_create_rsp *create_rsp;
2572 struct smb2_ioctl_rsp *ioctl_rsp;
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002573 struct reparse_data_buffer *reparse_buf;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002574 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002575
2576 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2577
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002578 *target_path = NULL;
2579
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002580 if (smb3_encryption_required(tcon))
2581 flags |= CIFS_TRANSFORM_REQ;
2582
2583 memset(rqst, 0, sizeof(rqst));
2584 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2585 memset(rsp_iov, 0, sizeof(rsp_iov));
2586
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002587 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2588 if (!utf16_path)
2589 return -ENOMEM;
2590
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002591 /* Open */
2592 memset(&open_iov, 0, sizeof(open_iov));
2593 rqst[0].rq_iov = open_iov;
2594 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2595
2596 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002597 oparms.tcon = tcon;
2598 oparms.desired_access = FILE_READ_ATTRIBUTES;
2599 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002600
Steve French5e196972018-08-27 17:04:13 -05002601 if (backup_cred(cifs_sb))
2602 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2603 else
2604 oparms.create_options = 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002605 if (is_reparse_point)
2606 oparms.create_options = OPEN_REPARSE_POINT;
2607
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002608 oparms.fid = &fid;
2609 oparms.reconnect = false;
2610
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002611 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
2612 if (rc)
2613 goto querty_exit;
2614 smb2_set_next_command(tcon, &rqst[0]);
2615
2616
2617 /* IOCTL */
2618 memset(&io_iov, 0, sizeof(io_iov));
2619 rqst[1].rq_iov = io_iov;
2620 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
2621
2622 rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
2623 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
2624 true /* is_fctl */, NULL, 0, CIFSMaxBufSize);
2625 if (rc)
2626 goto querty_exit;
2627
2628 smb2_set_next_command(tcon, &rqst[1]);
2629 smb2_set_related(&rqst[1]);
2630
2631
2632 /* Close */
2633 memset(&close_iov, 0, sizeof(close_iov));
2634 rqst[2].rq_iov = close_iov;
2635 rqst[2].rq_nvec = 1;
2636
2637 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2638 if (rc)
2639 goto querty_exit;
2640
2641 smb2_set_related(&rqst[2]);
2642
2643 rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
2644 resp_buftype, rsp_iov);
2645
2646 create_rsp = rsp_iov[0].iov_base;
2647 if (create_rsp && create_rsp->sync_hdr.Status)
2648 err_iov = rsp_iov[0];
2649 ioctl_rsp = rsp_iov[1].iov_base;
2650
2651 /*
2652 * Open was successful and we got an ioctl response.
2653 */
2654 if ((rc == 0) && (is_reparse_point)) {
2655 /* See MS-FSCC 2.3.23 */
2656
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002657 reparse_buf = (struct reparse_data_buffer *)
2658 ((char *)ioctl_rsp +
2659 le32_to_cpu(ioctl_rsp->OutputOffset));
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002660 plen = le32_to_cpu(ioctl_rsp->OutputCount);
2661
2662 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
2663 rsp_iov[1].iov_len) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002664 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002665 plen);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002666 rc = -EIO;
2667 goto querty_exit;
2668 }
2669
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002670 rc = parse_reparse_point(reparse_buf, plen, target_path,
2671 cifs_sb);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002672 goto querty_exit;
2673 }
2674
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002675 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002676 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002677 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002678 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002679
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002680 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002681 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002682 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002683 rc = -EINVAL;
2684 goto querty_exit;
2685 }
2686
2687 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2688 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
2689 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
2690 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002691 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002692 }
2693
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002694 /* open must fail on symlink - reset rc */
2695 rc = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002696 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2697 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002698 print_len = le16_to_cpu(symlink->PrintNameLength);
2699 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2700
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002701 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002702 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002703 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002704 }
2705
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002706 if (err_iov.iov_len <
2707 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002708 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002709 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002710 }
2711
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002712 *target_path = cifs_strndup_from_utf16(
2713 (char *)symlink->PathBuffer + sub_offset,
2714 sub_len, true, cifs_sb->local_nls);
2715 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002716 rc = -ENOMEM;
2717 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002718 }
2719 convert_delimiter(*target_path, '/');
2720 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002721
2722 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002723 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002724 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002725 SMB2_open_free(&rqst[0]);
2726 SMB2_ioctl_free(&rqst[1]);
2727 SMB2_close_free(&rqst[2]);
2728 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2729 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2730 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002731 return rc;
2732}
2733
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002734static struct cifs_ntsd *
2735get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2736 const struct cifs_fid *cifsfid, u32 *pacllen)
2737{
2738 struct cifs_ntsd *pntsd = NULL;
2739 unsigned int xid;
2740 int rc = -EOPNOTSUPP;
2741 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2742
2743 if (IS_ERR(tlink))
2744 return ERR_CAST(tlink);
2745
2746 xid = get_xid();
2747 cifs_dbg(FYI, "trying to get acl\n");
2748
2749 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2750 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2751 free_xid(xid);
2752
2753 cifs_put_tlink(tlink);
2754
2755 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2756 if (rc)
2757 return ERR_PTR(rc);
2758 return pntsd;
2759
2760}
2761
2762static struct cifs_ntsd *
2763get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2764 const char *path, u32 *pacllen)
2765{
2766 struct cifs_ntsd *pntsd = NULL;
2767 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2768 unsigned int xid;
2769 int rc;
2770 struct cifs_tcon *tcon;
2771 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2772 struct cifs_fid fid;
2773 struct cifs_open_parms oparms;
2774 __le16 *utf16_path;
2775
2776 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
2777 if (IS_ERR(tlink))
2778 return ERR_CAST(tlink);
2779
2780 tcon = tlink_tcon(tlink);
2781 xid = get_xid();
2782
2783 if (backup_cred(cifs_sb))
Colin Ian King709340a2017-07-05 13:47:34 +01002784 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002785 else
2786 oparms.create_options = 0;
2787
2788 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002789 if (!utf16_path) {
2790 rc = -ENOMEM;
2791 free_xid(xid);
2792 return ERR_PTR(rc);
2793 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002794
2795 oparms.tcon = tcon;
2796 oparms.desired_access = READ_CONTROL;
2797 oparms.disposition = FILE_OPEN;
2798 oparms.fid = &fid;
2799 oparms.reconnect = false;
2800
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002801 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002802 kfree(utf16_path);
2803 if (!rc) {
2804 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2805 fid.volatile_fid, (void **)&pntsd, pacllen);
2806 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2807 }
2808
2809 cifs_put_tlink(tlink);
2810 free_xid(xid);
2811
2812 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2813 if (rc)
2814 return ERR_PTR(rc);
2815 return pntsd;
2816}
2817
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002818static int
2819set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2820 struct inode *inode, const char *path, int aclflag)
2821{
2822 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2823 unsigned int xid;
2824 int rc, access_flags = 0;
2825 struct cifs_tcon *tcon;
2826 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2827 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2828 struct cifs_fid fid;
2829 struct cifs_open_parms oparms;
2830 __le16 *utf16_path;
2831
2832 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
2833 if (IS_ERR(tlink))
2834 return PTR_ERR(tlink);
2835
2836 tcon = tlink_tcon(tlink);
2837 xid = get_xid();
2838
2839 if (backup_cred(cifs_sb))
2840 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2841 else
2842 oparms.create_options = 0;
2843
2844 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
2845 access_flags = WRITE_OWNER;
2846 else
2847 access_flags = WRITE_DAC;
2848
2849 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002850 if (!utf16_path) {
2851 rc = -ENOMEM;
2852 free_xid(xid);
2853 return rc;
2854 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002855
2856 oparms.tcon = tcon;
2857 oparms.desired_access = access_flags;
2858 oparms.disposition = FILE_OPEN;
2859 oparms.path = path;
2860 oparms.fid = &fid;
2861 oparms.reconnect = false;
2862
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002863 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002864 kfree(utf16_path);
2865 if (!rc) {
2866 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2867 fid.volatile_fid, pnntsd, acllen, aclflag);
2868 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2869 }
2870
2871 cifs_put_tlink(tlink);
2872 free_xid(xid);
2873 return rc;
2874}
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002875
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002876/* Retrieve an ACL from the server */
2877static struct cifs_ntsd *
2878get_smb2_acl(struct cifs_sb_info *cifs_sb,
2879 struct inode *inode, const char *path,
2880 u32 *pacllen)
2881{
2882 struct cifs_ntsd *pntsd = NULL;
2883 struct cifsFileInfo *open_file = NULL;
2884
2885 if (inode)
2886 open_file = find_readable_file(CIFS_I(inode), true);
2887 if (!open_file)
2888 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
2889
2890 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
2891 cifsFileInfo_put(open_file);
2892 return pntsd;
2893}
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002894
Steve French30175622014-08-17 18:16:40 -05002895static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2896 loff_t offset, loff_t len, bool keep_size)
2897{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002898 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05002899 struct inode *inode;
2900 struct cifsInodeInfo *cifsi;
2901 struct cifsFileInfo *cfile = file->private_data;
2902 struct file_zero_data_information fsctl_buf;
2903 long rc;
2904 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002905 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05002906
2907 xid = get_xid();
2908
David Howells2b0143b2015-03-17 22:25:59 +00002909 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05002910 cifsi = CIFS_I(inode);
2911
Christoph Probsta205d502019-05-08 21:36:25 +02002912 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05002913 ses->Suid, offset, len);
2914
2915
Steve French30175622014-08-17 18:16:40 -05002916 /* if file not oplocked can't be sure whether asking to extend size */
2917 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002918 if (keep_size == false) {
2919 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002920 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
2921 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002922 free_xid(xid);
2923 return rc;
2924 }
Steve French30175622014-08-17 18:16:40 -05002925
Steve Frenchd1c35af2019-05-09 00:09:37 -05002926 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05002927
2928 fsctl_buf.FileOffset = cpu_to_le64(offset);
2929 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2930
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10002931 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2932 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
2933 (char *)&fsctl_buf,
2934 sizeof(struct file_zero_data_information),
2935 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002936 if (rc)
2937 goto zero_range_exit;
2938
2939 /*
2940 * do we also need to change the size of the file?
2941 */
2942 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002943 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10002944 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
2945 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002946 }
2947
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002948 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05002949 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05002950 if (rc)
2951 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
2952 ses->Suid, offset, len, rc);
2953 else
2954 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
2955 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05002956 return rc;
2957}
2958
Steve French31742c52014-08-17 08:38:47 -05002959static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2960 loff_t offset, loff_t len)
2961{
2962 struct inode *inode;
Steve French31742c52014-08-17 08:38:47 -05002963 struct cifsFileInfo *cfile = file->private_data;
2964 struct file_zero_data_information fsctl_buf;
2965 long rc;
2966 unsigned int xid;
2967 __u8 set_sparse = 1;
2968
2969 xid = get_xid();
2970
David Howells2b0143b2015-03-17 22:25:59 +00002971 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05002972
2973 /* Need to make file sparse, if not already, before freeing range. */
2974 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05002975 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
2976 rc = -EOPNOTSUPP;
2977 free_xid(xid);
2978 return rc;
2979 }
Steve French31742c52014-08-17 08:38:47 -05002980
Christoph Probsta205d502019-05-08 21:36:25 +02002981 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05002982
2983 fsctl_buf.FileOffset = cpu_to_le64(offset);
2984 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2985
2986 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2987 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002988 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05002989 sizeof(struct file_zero_data_information),
2990 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05002991 free_xid(xid);
2992 return rc;
2993}
2994
Steve French9ccf3212014-10-18 17:01:15 -05002995static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
2996 loff_t off, loff_t len, bool keep_size)
2997{
2998 struct inode *inode;
2999 struct cifsInodeInfo *cifsi;
3000 struct cifsFileInfo *cfile = file->private_data;
3001 long rc = -EOPNOTSUPP;
3002 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003003 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05003004
3005 xid = get_xid();
3006
David Howells2b0143b2015-03-17 22:25:59 +00003007 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05003008 cifsi = CIFS_I(inode);
3009
Steve French779ede02019-03-13 01:41:49 -05003010 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3011 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003012 /* if file not oplocked can't be sure whether asking to extend size */
3013 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003014 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05003015 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3016 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003017 free_xid(xid);
3018 return rc;
3019 }
Steve French9ccf3212014-10-18 17:01:15 -05003020
3021 /*
3022 * Files are non-sparse by default so falloc may be a no-op
3023 * Must check if file sparse. If not sparse, and not extending
3024 * then no need to do anything since file already allocated
3025 */
3026 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
3027 if (keep_size == true)
Steve Frenchcfe89092018-05-19 02:04:55 -05003028 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05003029 /* check if extending file */
3030 else if (i_size_read(inode) >= off + len)
3031 /* not extending file and already not sparse */
Steve Frenchcfe89092018-05-19 02:04:55 -05003032 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05003033 /* BB: in future add else clause to extend file */
3034 else
Steve Frenchcfe89092018-05-19 02:04:55 -05003035 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003036 if (rc)
3037 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3038 tcon->tid, tcon->ses->Suid, off, len, rc);
3039 else
3040 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid,
3041 tcon->tid, tcon->ses->Suid, off, len);
Steve Frenchcfe89092018-05-19 02:04:55 -05003042 free_xid(xid);
3043 return rc;
Steve French9ccf3212014-10-18 17:01:15 -05003044 }
3045
3046 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3047 /*
3048 * Check if falloc starts within first few pages of file
3049 * and ends within a few pages of the end of file to
3050 * ensure that most of file is being forced to be
3051 * fallocated now. If so then setting whole file sparse
3052 * ie potentially making a few extra pages at the beginning
3053 * or end of the file non-sparse via set_sparse is harmless.
3054 */
Steve Frenchcfe89092018-05-19 02:04:55 -05003055 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3056 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003057 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3058 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003059 free_xid(xid);
3060 return rc;
3061 }
Steve French9ccf3212014-10-18 17:01:15 -05003062
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003063 smb2_set_sparse(xid, tcon, cfile, inode, false);
3064 rc = 0;
3065 } else {
3066 smb2_set_sparse(xid, tcon, cfile, inode, false);
3067 rc = 0;
3068 if (i_size_read(inode) < off + len) {
3069 eof = cpu_to_le64(off + len);
3070 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3071 cfile->fid.volatile_fid, cfile->pid,
3072 &eof);
3073 }
Steve French9ccf3212014-10-18 17:01:15 -05003074 }
Steve French9ccf3212014-10-18 17:01:15 -05003075
Steve French779ede02019-03-13 01:41:49 -05003076 if (rc)
3077 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3078 tcon->ses->Suid, off, len, rc);
3079 else
3080 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3081 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003082
3083 free_xid(xid);
3084 return rc;
3085}
3086
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003087static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3088{
3089 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3090 struct cifsInodeInfo *cifsi;
3091 struct inode *inode;
3092 int rc = 0;
3093 struct file_allocated_range_buffer in_data, *out_data = NULL;
3094 u32 out_data_len;
3095 unsigned int xid;
3096
3097 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3098 return generic_file_llseek(file, offset, whence);
3099
3100 inode = d_inode(cfile->dentry);
3101 cifsi = CIFS_I(inode);
3102
3103 if (offset < 0 || offset >= i_size_read(inode))
3104 return -ENXIO;
3105
3106 xid = get_xid();
3107 /*
3108 * We need to be sure that all dirty pages are written as they
3109 * might fill holes on the server.
3110 * Note that we also MUST flush any written pages since at least
3111 * some servers (Windows2016) will not reflect recent writes in
3112 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3113 */
3114 wrcfile = find_writable_file(cifsi, false);
3115 if (wrcfile) {
3116 filemap_write_and_wait(inode->i_mapping);
3117 smb2_flush_file(xid, tcon, &wrcfile->fid);
3118 cifsFileInfo_put(wrcfile);
3119 }
3120
3121 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3122 if (whence == SEEK_HOLE)
3123 offset = i_size_read(inode);
3124 goto lseek_exit;
3125 }
3126
3127 in_data.file_offset = cpu_to_le64(offset);
3128 in_data.length = cpu_to_le64(i_size_read(inode));
3129
3130 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3131 cfile->fid.volatile_fid,
3132 FSCTL_QUERY_ALLOCATED_RANGES, true,
3133 (char *)&in_data, sizeof(in_data),
3134 sizeof(struct file_allocated_range_buffer),
3135 (char **)&out_data, &out_data_len);
3136 if (rc == -E2BIG)
3137 rc = 0;
3138 if (rc)
3139 goto lseek_exit;
3140
3141 if (whence == SEEK_HOLE && out_data_len == 0)
3142 goto lseek_exit;
3143
3144 if (whence == SEEK_DATA && out_data_len == 0) {
3145 rc = -ENXIO;
3146 goto lseek_exit;
3147 }
3148
3149 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3150 rc = -EINVAL;
3151 goto lseek_exit;
3152 }
3153 if (whence == SEEK_DATA) {
3154 offset = le64_to_cpu(out_data->file_offset);
3155 goto lseek_exit;
3156 }
3157 if (offset < le64_to_cpu(out_data->file_offset))
3158 goto lseek_exit;
3159
3160 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3161
3162 lseek_exit:
3163 free_xid(xid);
3164 kfree(out_data);
3165 if (!rc)
3166 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3167 else
3168 return rc;
3169}
3170
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003171static int smb3_fiemap(struct cifs_tcon *tcon,
3172 struct cifsFileInfo *cfile,
3173 struct fiemap_extent_info *fei, u64 start, u64 len)
3174{
3175 unsigned int xid;
3176 struct file_allocated_range_buffer in_data, *out_data;
3177 u32 out_data_len;
3178 int i, num, rc, flags, last_blob;
3179 u64 next;
3180
3181 if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
3182 return -EBADR;
3183
3184 xid = get_xid();
3185 again:
3186 in_data.file_offset = cpu_to_le64(start);
3187 in_data.length = cpu_to_le64(len);
3188
3189 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3190 cfile->fid.volatile_fid,
3191 FSCTL_QUERY_ALLOCATED_RANGES, true,
3192 (char *)&in_data, sizeof(in_data),
3193 1024 * sizeof(struct file_allocated_range_buffer),
3194 (char **)&out_data, &out_data_len);
3195 if (rc == -E2BIG) {
3196 last_blob = 0;
3197 rc = 0;
3198 } else
3199 last_blob = 1;
3200 if (rc)
3201 goto out;
3202
3203 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3204 rc = -EINVAL;
3205 goto out;
3206 }
3207 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3208 rc = -EINVAL;
3209 goto out;
3210 }
3211
3212 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3213 for (i = 0; i < num; i++) {
3214 flags = 0;
3215 if (i == num - 1 && last_blob)
3216 flags |= FIEMAP_EXTENT_LAST;
3217
3218 rc = fiemap_fill_next_extent(fei,
3219 le64_to_cpu(out_data[i].file_offset),
3220 le64_to_cpu(out_data[i].file_offset),
3221 le64_to_cpu(out_data[i].length),
3222 flags);
3223 if (rc < 0)
3224 goto out;
3225 if (rc == 1) {
3226 rc = 0;
3227 goto out;
3228 }
3229 }
3230
3231 if (!last_blob) {
3232 next = le64_to_cpu(out_data[num - 1].file_offset) +
3233 le64_to_cpu(out_data[num - 1].length);
3234 len = len - (next - start);
3235 start = next;
3236 goto again;
3237 }
3238
3239 out:
3240 free_xid(xid);
3241 kfree(out_data);
3242 return rc;
3243}
Steve French9ccf3212014-10-18 17:01:15 -05003244
Steve French31742c52014-08-17 08:38:47 -05003245static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3246 loff_t off, loff_t len)
3247{
3248 /* KEEP_SIZE already checked for by do_fallocate */
3249 if (mode & FALLOC_FL_PUNCH_HOLE)
3250 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05003251 else if (mode & FALLOC_FL_ZERO_RANGE) {
3252 if (mode & FALLOC_FL_KEEP_SIZE)
3253 return smb3_zero_range(file, tcon, off, len, true);
3254 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05003255 } else if (mode == FALLOC_FL_KEEP_SIZE)
3256 return smb3_simple_falloc(file, tcon, off, len, true);
3257 else if (mode == 0)
3258 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05003259
3260 return -EOPNOTSUPP;
3261}
3262
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003263static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003264smb2_downgrade_oplock(struct TCP_Server_Info *server,
3265 struct cifsInodeInfo *cinode, bool set_level2)
3266{
3267 if (set_level2)
3268 server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
3269 0, NULL);
3270 else
3271 server->ops->set_oplock_level(cinode, 0, 0, NULL);
3272}
3273
3274static void
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003275smb21_downgrade_oplock(struct TCP_Server_Info *server,
3276 struct cifsInodeInfo *cinode, bool set_level2)
3277{
3278 server->ops->set_oplock_level(cinode,
3279 set_level2 ? SMB2_LEASE_READ_CACHING_HE :
3280 0, 0, NULL);
3281}
3282
3283static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003284smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3285 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003286{
3287 oplock &= 0xFF;
3288 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3289 return;
3290 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003291 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003292 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3293 &cinode->vfs_inode);
3294 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003295 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003296 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3297 &cinode->vfs_inode);
3298 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3299 cinode->oplock = CIFS_CACHE_READ_FLG;
3300 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3301 &cinode->vfs_inode);
3302 } else
3303 cinode->oplock = 0;
3304}
3305
3306static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003307smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3308 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003309{
3310 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003311 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003312
3313 oplock &= 0xFF;
3314 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3315 return;
3316
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003317 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003318 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003319 strcat(message, "R");
3320 }
3321 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003322 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003323 strcat(message, "H");
3324 }
3325 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003326 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003327 strcat(message, "W");
3328 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003329 if (!new_oplock)
3330 strncpy(message, "None", sizeof(message));
3331
3332 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003333 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3334 &cinode->vfs_inode);
3335}
3336
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003337static void
3338smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3339 unsigned int epoch, bool *purge_cache)
3340{
3341 unsigned int old_oplock = cinode->oplock;
3342
3343 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3344
3345 if (purge_cache) {
3346 *purge_cache = false;
3347 if (old_oplock == CIFS_CACHE_READ_FLG) {
3348 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3349 (epoch - cinode->epoch > 0))
3350 *purge_cache = true;
3351 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3352 (epoch - cinode->epoch > 1))
3353 *purge_cache = true;
3354 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3355 (epoch - cinode->epoch > 1))
3356 *purge_cache = true;
3357 else if (cinode->oplock == 0 &&
3358 (epoch - cinode->epoch > 0))
3359 *purge_cache = true;
3360 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
3361 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3362 (epoch - cinode->epoch > 0))
3363 *purge_cache = true;
3364 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3365 (epoch - cinode->epoch > 1))
3366 *purge_cache = true;
3367 }
3368 cinode->epoch = epoch;
3369 }
3370}
3371
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003372static bool
3373smb2_is_read_op(__u32 oplock)
3374{
3375 return oplock == SMB2_OPLOCK_LEVEL_II;
3376}
3377
3378static bool
3379smb21_is_read_op(__u32 oplock)
3380{
3381 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3382 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3383}
3384
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003385static __le32
3386map_oplock_to_lease(u8 oplock)
3387{
3388 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3389 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3390 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3391 return SMB2_LEASE_READ_CACHING;
3392 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3393 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3394 SMB2_LEASE_WRITE_CACHING;
3395 return 0;
3396}
3397
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003398static char *
3399smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3400{
3401 struct create_lease *buf;
3402
3403 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
3404 if (!buf)
3405 return NULL;
3406
Stefano Brivio729c0c92018-07-05 15:10:02 +02003407 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003408 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003409
3410 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3411 (struct create_lease, lcontext));
3412 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
3413 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3414 (struct create_lease, Name));
3415 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003416 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003417 buf->Name[0] = 'R';
3418 buf->Name[1] = 'q';
3419 buf->Name[2] = 'L';
3420 buf->Name[3] = 's';
3421 return (char *)buf;
3422}
3423
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003424static char *
3425smb3_create_lease_buf(u8 *lease_key, u8 oplock)
3426{
3427 struct create_lease_v2 *buf;
3428
3429 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
3430 if (!buf)
3431 return NULL;
3432
Stefano Brivio729c0c92018-07-05 15:10:02 +02003433 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003434 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3435
3436 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3437 (struct create_lease_v2, lcontext));
3438 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
3439 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3440 (struct create_lease_v2, Name));
3441 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003442 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003443 buf->Name[0] = 'R';
3444 buf->Name[1] = 'q';
3445 buf->Name[2] = 'L';
3446 buf->Name[3] = 's';
3447 return (char *)buf;
3448}
3449
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003450static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003451smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003452{
3453 struct create_lease *lc = (struct create_lease *)buf;
3454
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003455 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003456 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3457 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3458 return le32_to_cpu(lc->lcontext.LeaseState);
3459}
3460
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003461static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003462smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003463{
3464 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
3465
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003466 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003467 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3468 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003469 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02003470 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003471 return le32_to_cpu(lc->lcontext.LeaseState);
3472}
3473
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003474static unsigned int
3475smb2_wp_retry_size(struct inode *inode)
3476{
3477 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
3478 SMB2_MAX_BUFFER_SIZE);
3479}
3480
Pavel Shilovsky52755802014-08-18 20:49:57 +04003481static bool
3482smb2_dir_needs_close(struct cifsFileInfo *cfile)
3483{
3484 return !cfile->invalidHandle;
3485}
3486
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003487static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003488fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
Steve French2b2f7542019-06-07 15:16:10 -05003489 struct smb_rqst *old_rq, __le16 cipher_type)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003490{
3491 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003492 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003493
3494 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
3495 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
3496 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
3497 tr_hdr->Flags = cpu_to_le16(0x01);
Steve French2b2f7542019-06-07 15:16:10 -05003498 if (cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3499 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3500 else
3501 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003502 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003503}
3504
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003505/* We can not use the normal sg_set_buf() as we will sometimes pass a
3506 * stack object as buf.
3507 */
3508static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3509 unsigned int buflen)
3510{
Sebastien Tisserantee9d6612019-08-01 12:06:08 -05003511 void *addr;
3512 /*
3513 * VMAP_STACK (at least) puts stack into the vmalloc address space
3514 */
3515 if (is_vmalloc_addr(buf))
3516 addr = vmalloc_to_page(buf);
3517 else
3518 addr = virt_to_page(buf);
3519 sg_set_page(sg, addr, buflen, offset_in_page(buf));
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003520}
3521
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003522/* Assumes the first rqst has a transform header as the first iov.
3523 * I.e.
3524 * rqst[0].rq_iov[0] is transform header
3525 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
3526 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003527 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003528static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003529init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003530{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003531 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003532 struct scatterlist *sg;
3533 unsigned int i;
3534 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003535 unsigned int idx = 0;
3536 int skip;
3537
3538 sg_len = 1;
3539 for (i = 0; i < num_rqst; i++)
3540 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003541
3542 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
3543 if (!sg)
3544 return NULL;
3545
3546 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003547 for (i = 0; i < num_rqst; i++) {
3548 for (j = 0; j < rqst[i].rq_nvec; j++) {
3549 /*
3550 * The first rqst has a transform header where the
3551 * first 20 bytes are not part of the encrypted blob
3552 */
3553 skip = (i == 0) && (j == 0) ? 20 : 0;
3554 smb2_sg_set_buf(&sg[idx++],
3555 rqst[i].rq_iov[j].iov_base + skip,
3556 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10003557 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05003558
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003559 for (j = 0; j < rqst[i].rq_npages; j++) {
3560 unsigned int len, offset;
3561
3562 rqst_page_get_length(&rqst[i], j, &len, &offset);
3563 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
3564 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003565 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003566 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003567 return sg;
3568}
3569
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003570static int
3571smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
3572{
3573 struct cifs_ses *ses;
3574 u8 *ses_enc_key;
3575
3576 spin_lock(&cifs_tcp_ses_lock);
3577 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3578 if (ses->Suid != ses_id)
3579 continue;
3580 ses_enc_key = enc ? ses->smb3encryptionkey :
3581 ses->smb3decryptionkey;
3582 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
3583 spin_unlock(&cifs_tcp_ses_lock);
3584 return 0;
3585 }
3586 spin_unlock(&cifs_tcp_ses_lock);
3587
3588 return 1;
3589}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003590/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003591 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
3592 * iov[0] - transform header (associate data),
3593 * iov[1-N] - SMB2 header and pages - data to encrypt.
3594 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003595 * untouched.
3596 */
3597static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003598crypt_message(struct TCP_Server_Info *server, int num_rqst,
3599 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003600{
3601 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003602 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003603 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003604 int rc = 0;
3605 struct scatterlist *sg;
3606 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003607 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003608 struct aead_request *req;
3609 char *iv;
3610 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003611 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003612 struct crypto_aead *tfm;
3613 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3614
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003615 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
3616 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003617 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003618 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003619 return 0;
3620 }
3621
3622 rc = smb3_crypto_aead_allocate(server);
3623 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003624 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003625 return rc;
3626 }
3627
3628 tfm = enc ? server->secmech.ccmaesencrypt :
3629 server->secmech.ccmaesdecrypt;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003630 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003631 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003632 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003633 return rc;
3634 }
3635
3636 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3637 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003638 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003639 return rc;
3640 }
3641
3642 req = aead_request_alloc(tfm, GFP_KERNEL);
3643 if (!req) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003644 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003645 return -ENOMEM;
3646 }
3647
3648 if (!enc) {
3649 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3650 crypt_len += SMB2_SIGNATURE_SIZE;
3651 }
3652
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003653 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003654 if (!sg) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003655 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003656 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003657 goto free_req;
3658 }
3659
3660 iv_len = crypto_aead_ivsize(tfm);
3661 iv = kzalloc(iv_len, GFP_KERNEL);
3662 if (!iv) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003663 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003664 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003665 goto free_sg;
3666 }
Steve French2b2f7542019-06-07 15:16:10 -05003667
3668 if (server->cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3669 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3670 else {
3671 iv[0] = 3;
3672 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
3673 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003674
3675 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
3676 aead_request_set_ad(req, assoc_data_len);
3677
3678 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003679 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003680
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003681 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
3682 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003683
3684 if (!rc && enc)
3685 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
3686
3687 kfree(iv);
3688free_sg:
3689 kfree(sg);
3690free_req:
3691 kfree(req);
3692 return rc;
3693}
3694
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003695void
3696smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003697{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003698 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003699
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003700 for (i = 0; i < num_rqst; i++) {
3701 if (rqst[i].rq_pages) {
3702 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
3703 put_page(rqst[i].rq_pages[j]);
3704 kfree(rqst[i].rq_pages);
3705 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003706 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003707}
3708
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003709/*
3710 * This function will initialize new_rq and encrypt the content.
3711 * The first entry, new_rq[0], only contains a single iov which contains
3712 * a smb2_transform_hdr and is pre-allocated by the caller.
3713 * This function then populates new_rq[1+] with the content from olq_rq[0+].
3714 *
3715 * The end result is an array of smb_rqst structures where the first structure
3716 * only contains a single iov for the transform header which we then can pass
3717 * to crypt_message().
3718 *
3719 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
3720 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
3721 */
3722static int
3723smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
3724 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003725{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003726 struct page **pages;
3727 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
3728 unsigned int npages;
3729 unsigned int orig_len = 0;
3730 int i, j;
3731 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003732
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003733 for (i = 1; i < num_rqst; i++) {
3734 npages = old_rq[i - 1].rq_npages;
3735 pages = kmalloc_array(npages, sizeof(struct page *),
3736 GFP_KERNEL);
3737 if (!pages)
3738 goto err_free;
3739
3740 new_rq[i].rq_pages = pages;
3741 new_rq[i].rq_npages = npages;
3742 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
3743 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
3744 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
3745 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
3746 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
3747
3748 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
3749
3750 for (j = 0; j < npages; j++) {
3751 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3752 if (!pages[j])
3753 goto err_free;
3754 }
3755
3756 /* copy pages form the old */
3757 for (j = 0; j < npages; j++) {
3758 char *dst, *src;
3759 unsigned int offset, len;
3760
3761 rqst_page_get_length(&new_rq[i], j, &len, &offset);
3762
3763 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
3764 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
3765
3766 memcpy(dst, src, len);
3767 kunmap(new_rq[i].rq_pages[j]);
3768 kunmap(old_rq[i - 1].rq_pages[j]);
3769 }
3770 }
3771
3772 /* fill the 1st iov with a transform header */
Steve French2b2f7542019-06-07 15:16:10 -05003773 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003774
3775 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02003776 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003777 if (rc)
3778 goto err_free;
3779
3780 return rc;
3781
3782err_free:
3783 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
3784 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003785}
3786
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003787static int
3788smb3_is_transform_hdr(void *buf)
3789{
3790 struct smb2_transform_hdr *trhdr = buf;
3791
3792 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
3793}
3794
3795static int
3796decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
3797 unsigned int buf_data_size, struct page **pages,
3798 unsigned int npages, unsigned int page_data_size)
3799{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003800 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003801 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003802 int rc;
3803
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003804 iov[0].iov_base = buf;
3805 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
3806 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
3807 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003808
3809 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003810 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003811 rqst.rq_pages = pages;
3812 rqst.rq_npages = npages;
3813 rqst.rq_pagesz = PAGE_SIZE;
3814 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
3815
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003816 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02003817 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003818
3819 if (rc)
3820 return rc;
3821
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003822 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003823
3824 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003825
3826 return rc;
3827}
3828
3829static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003830read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
3831 unsigned int npages, unsigned int len)
3832{
3833 int i;
3834 int length;
3835
3836 for (i = 0; i < npages; i++) {
3837 struct page *page = pages[i];
3838 size_t n;
3839
3840 n = len;
3841 if (len >= PAGE_SIZE) {
3842 /* enough data to fill the page */
3843 n = PAGE_SIZE;
3844 len -= n;
3845 } else {
3846 zero_user(page, len, PAGE_SIZE - len);
3847 len = 0;
3848 }
Long Li1dbe3462018-05-30 12:47:55 -07003849 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003850 if (length < 0)
3851 return length;
3852 server->total_read += length;
3853 }
3854
3855 return 0;
3856}
3857
3858static int
3859init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
3860 unsigned int cur_off, struct bio_vec **page_vec)
3861{
3862 struct bio_vec *bvec;
3863 int i;
3864
3865 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
3866 if (!bvec)
3867 return -ENOMEM;
3868
3869 for (i = 0; i < npages; i++) {
3870 bvec[i].bv_page = pages[i];
3871 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
3872 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
3873 data_size -= bvec[i].bv_len;
3874 }
3875
3876 if (data_size != 0) {
3877 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
3878 kfree(bvec);
3879 return -EIO;
3880 }
3881
3882 *page_vec = bvec;
3883 return 0;
3884}
3885
3886static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003887handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3888 char *buf, unsigned int buf_len, struct page **pages,
3889 unsigned int npages, unsigned int page_data_size)
3890{
3891 unsigned int data_offset;
3892 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003893 unsigned int cur_off;
3894 unsigned int cur_page_idx;
3895 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003896 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10003897 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003898 struct bio_vec *bvec = NULL;
3899 struct iov_iter iter;
3900 struct kvec iov;
3901 int length;
Long Li74dcf412017-11-22 17:38:46 -07003902 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003903
3904 if (shdr->Command != SMB2_READ) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003905 cifs_server_dbg(VFS, "only big read responses are supported\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003906 return -ENOTSUPP;
3907 }
3908
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003909 if (server->ops->is_session_expired &&
3910 server->ops->is_session_expired(buf)) {
3911 cifs_reconnect(server);
3912 wake_up(&server->response_q);
3913 return -1;
3914 }
3915
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003916 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08003917 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003918 return -1;
3919
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003920 /* set up first two iov to get credits */
3921 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003922 rdata->iov[0].iov_len = 0;
3923 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003924 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003925 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003926 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3927 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3928 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3929 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3930
3931 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003932 if (rdata->result != 0) {
3933 cifs_dbg(FYI, "%s: server returned error %d\n",
3934 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003935 /* normal error on read response */
3936 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003937 return 0;
3938 }
3939
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003940 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07003941#ifdef CONFIG_CIFS_SMB_DIRECT
3942 use_rdma_mr = rdata->mr;
3943#endif
3944 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003945
3946 if (data_offset < server->vals->read_rsp_size) {
3947 /*
3948 * win2k8 sometimes sends an offset of 0 when the read
3949 * is beyond the EOF. Treat it as if the data starts just after
3950 * the header.
3951 */
3952 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
3953 __func__, data_offset);
3954 data_offset = server->vals->read_rsp_size;
3955 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
3956 /* data_offset is beyond the end of smallbuf */
3957 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
3958 __func__, data_offset);
3959 rdata->result = -EIO;
3960 dequeue_mid(mid, rdata->result);
3961 return 0;
3962 }
3963
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003964 pad_len = data_offset - server->vals->read_rsp_size;
3965
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003966 if (buf_len <= data_offset) {
3967 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003968 cur_page_idx = pad_len / PAGE_SIZE;
3969 cur_off = pad_len % PAGE_SIZE;
3970
3971 if (cur_page_idx != 0) {
3972 /* data offset is beyond the 1st page of response */
3973 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
3974 __func__, data_offset);
3975 rdata->result = -EIO;
3976 dequeue_mid(mid, rdata->result);
3977 return 0;
3978 }
3979
3980 if (data_len > page_data_size - pad_len) {
3981 /* data_len is corrupt -- discard frame */
3982 rdata->result = -EIO;
3983 dequeue_mid(mid, rdata->result);
3984 return 0;
3985 }
3986
3987 rdata->result = init_read_bvec(pages, npages, page_data_size,
3988 cur_off, &bvec);
3989 if (rdata->result != 0) {
3990 dequeue_mid(mid, rdata->result);
3991 return 0;
3992 }
3993
David Howellsaa563d72018-10-20 00:57:56 +01003994 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003995 } else if (buf_len >= data_offset + data_len) {
3996 /* read response payload is in buf */
3997 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
3998 iov.iov_base = buf + data_offset;
3999 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01004000 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004001 } else {
4002 /* read response payload cannot be in both buf and pages */
4003 WARN_ONCE(1, "buf can not contain only a part of read data");
4004 rdata->result = -EIO;
4005 dequeue_mid(mid, rdata->result);
4006 return 0;
4007 }
4008
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004009 length = rdata->copy_into_pages(server, rdata, &iter);
4010
4011 kfree(bvec);
4012
4013 if (length < 0)
4014 return length;
4015
4016 dequeue_mid(mid, false);
4017 return length;
4018}
4019
Steve French35cf94a2019-09-07 01:09:49 -05004020struct smb2_decrypt_work {
4021 struct work_struct decrypt;
4022 struct TCP_Server_Info *server;
4023 struct page **ppages;
4024 char *buf;
4025 unsigned int npages;
4026 unsigned int len;
4027};
4028
4029
4030static void smb2_decrypt_offload(struct work_struct *work)
4031{
4032 struct smb2_decrypt_work *dw = container_of(work,
4033 struct smb2_decrypt_work, decrypt);
4034 int i, rc;
4035 struct mid_q_entry *mid;
4036
4037 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
4038 dw->ppages, dw->npages, dw->len);
4039 if (rc) {
4040 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4041 goto free_pages;
4042 }
4043
4044 mid = smb2_find_mid(dw->server, dw->buf);
4045 if (mid == NULL)
4046 cifs_dbg(FYI, "mid not found\n");
4047 else {
4048 mid->decrypted = true;
4049 rc = handle_read_data(dw->server, mid, dw->buf,
4050 dw->server->vals->read_rsp_size,
4051 dw->ppages, dw->npages, dw->len);
4052 }
4053
4054 dw->server->lstrp = jiffies;
4055
4056 mid->callback(mid);
4057
4058 cifs_mid_q_entry_release(mid);
4059
4060free_pages:
4061 for (i = dw->npages-1; i >= 0; i--)
4062 put_page(dw->ppages[i]);
4063
4064 kfree(dw->ppages);
4065 cifs_small_buf_release(dw->buf);
4066}
4067
4068
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004069static int
Steve French35cf94a2019-09-07 01:09:49 -05004070receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4071 int *num_mids)
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004072{
4073 char *buf = server->smallbuf;
4074 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4075 unsigned int npages;
4076 struct page **pages;
4077 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004078 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004079 int rc;
4080 int i = 0;
Steve French35cf94a2019-09-07 01:09:49 -05004081 struct smb2_decrypt_work *dw;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004082
Steve French35cf94a2019-09-07 01:09:49 -05004083 *num_mids = 1;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004084 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004085 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4086
4087 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4088 if (rc < 0)
4089 return rc;
4090 server->total_read += rc;
4091
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004092 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11004093 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004094 npages = DIV_ROUND_UP(len, PAGE_SIZE);
4095
4096 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
4097 if (!pages) {
4098 rc = -ENOMEM;
4099 goto discard_data;
4100 }
4101
4102 for (; i < npages; i++) {
4103 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4104 if (!pages[i]) {
4105 rc = -ENOMEM;
4106 goto discard_data;
4107 }
4108 }
4109
4110 /* read read data into pages */
4111 rc = read_data_into_pages(server, pages, npages, len);
4112 if (rc)
4113 goto free_pages;
4114
Pavel Shilovsky350be252017-04-10 10:31:33 -07004115 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004116 if (rc)
4117 goto free_pages;
4118
Steve French35cf94a2019-09-07 01:09:49 -05004119 /*
4120 * For large reads, offload to different thread for better performance,
4121 * use more cores decrypting which can be expensive
4122 */
4123
4124 /* TODO: make the size limit to enable decrypt offload configurable */
4125 if (server->pdu_size > (512 * 1024)) {
4126 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4127 if (dw == NULL)
4128 goto non_offloaded_decrypt;
4129
4130 dw->buf = server->smallbuf;
4131 server->smallbuf = (char *)cifs_small_buf_get();
4132
4133 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4134
4135 dw->npages = npages;
4136 dw->server = server;
4137 dw->ppages = pages;
4138 dw->len = len;
4139 queue_work(cifsiod_wq, &dw->decrypt);
4140 *num_mids = 0; /* worker thread takes care of finding mid */
4141 return -1;
4142 }
4143
4144non_offloaded_decrypt:
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004145 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004146 pages, npages, len);
4147 if (rc)
4148 goto free_pages;
4149
4150 *mid = smb2_find_mid(server, buf);
4151 if (*mid == NULL)
4152 cifs_dbg(FYI, "mid not found\n");
4153 else {
4154 cifs_dbg(FYI, "mid found\n");
4155 (*mid)->decrypted = true;
4156 rc = handle_read_data(server, *mid, buf,
4157 server->vals->read_rsp_size,
4158 pages, npages, len);
4159 }
4160
4161free_pages:
4162 for (i = i - 1; i >= 0; i--)
4163 put_page(pages[i]);
4164 kfree(pages);
4165 return rc;
4166discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07004167 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004168 goto free_pages;
4169}
4170
4171static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004172receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004173 struct mid_q_entry **mids, char **bufs,
4174 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004175{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004176 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004177 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004178 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004179 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004180 unsigned int buf_size;
4181 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004182 int next_is_large;
4183 char *next_buffer = NULL;
4184
4185 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004186
4187 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004188 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004189 server->large_buf = true;
4190 memcpy(server->bigbuf, buf, server->total_read);
4191 buf = server->bigbuf;
4192 }
4193
4194 /* now read the rest */
4195 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004196 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004197 if (length < 0)
4198 return length;
4199 server->total_read += length;
4200
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004201 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004202 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
4203 if (length)
4204 return length;
4205
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004206 next_is_large = server->large_buf;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004207one_more:
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004208 shdr = (struct smb2_sync_hdr *)buf;
4209 if (shdr->NextCommand) {
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004210 if (next_is_large)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004211 next_buffer = (char *)cifs_buf_get();
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004212 else
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004213 next_buffer = (char *)cifs_small_buf_get();
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004214 memcpy(next_buffer,
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004215 buf + le32_to_cpu(shdr->NextCommand),
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004216 pdu_length - le32_to_cpu(shdr->NextCommand));
4217 }
4218
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004219 mid_entry = smb2_find_mid(server, buf);
4220 if (mid_entry == NULL)
4221 cifs_dbg(FYI, "mid not found\n");
4222 else {
4223 cifs_dbg(FYI, "mid found\n");
4224 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004225 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004226 }
4227
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004228 if (*num_mids >= MAX_COMPOUND) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004229 cifs_server_dbg(VFS, "too many PDUs in compound\n");
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004230 return -1;
4231 }
4232 bufs[*num_mids] = buf;
4233 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004234
4235 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004236 ret = mid_entry->handle(server, mid_entry);
4237 else
4238 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004239
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004240 if (ret == 0 && shdr->NextCommand) {
4241 pdu_length -= le32_to_cpu(shdr->NextCommand);
4242 server->large_buf = next_is_large;
4243 if (next_is_large)
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004244 server->bigbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004245 else
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004246 server->smallbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004247 goto one_more;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004248 } else if (ret != 0) {
4249 /*
4250 * ret != 0 here means that we didn't get to handle_mid() thus
4251 * server->smallbuf and server->bigbuf are still valid. We need
4252 * to free next_buffer because it is not going to be used
4253 * anywhere.
4254 */
4255 if (next_is_large)
4256 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4257 else
4258 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004259 }
4260
4261 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004262}
4263
4264static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004265smb3_receive_transform(struct TCP_Server_Info *server,
4266 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004267{
4268 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004269 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004270 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4271 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4272
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004273 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004274 sizeof(struct smb2_sync_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004275 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004276 pdu_length);
4277 cifs_reconnect(server);
4278 wake_up(&server->response_q);
4279 return -ECONNABORTED;
4280 }
4281
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004282 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004283 cifs_server_dbg(VFS, "Transform message is broken\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004284 cifs_reconnect(server);
4285 wake_up(&server->response_q);
4286 return -ECONNABORTED;
4287 }
4288
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004289 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004290 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
Steve French35cf94a2019-09-07 01:09:49 -05004291 return receive_encrypted_read(server, &mids[0], num_mids);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004292 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004293
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004294 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004295}
4296
4297int
4298smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4299{
4300 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
4301
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004302 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004303 NULL, 0, 0);
4304}
4305
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004306static int
4307smb2_next_header(char *buf)
4308{
4309 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
4310 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
4311
4312 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
4313 return sizeof(struct smb2_transform_hdr) +
4314 le32_to_cpu(t_hdr->OriginalMessageSize);
4315
4316 return le32_to_cpu(hdr->NextCommand);
4317}
4318
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004319static int
4320smb2_make_node(unsigned int xid, struct inode *inode,
4321 struct dentry *dentry, struct cifs_tcon *tcon,
4322 char *full_path, umode_t mode, dev_t dev)
4323{
4324 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4325 int rc = -EPERM;
4326 int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
4327 FILE_ALL_INFO *buf = NULL;
4328 struct cifs_io_parms io_parms;
4329 __u32 oplock = 0;
4330 struct cifs_fid fid;
4331 struct cifs_open_parms oparms;
4332 unsigned int bytes_written;
4333 struct win_dev *pdev;
4334 struct kvec iov[2];
4335
4336 /*
4337 * Check if mounted with mount parm 'sfu' mount parm.
4338 * SFU emulation should work with all servers, but only
4339 * supports block and char device (no socket & fifo),
4340 * and was used by default in earlier versions of Windows
4341 */
4342 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
4343 goto out;
4344
4345 /*
4346 * TODO: Add ability to create instead via reparse point. Windows (e.g.
4347 * their current NFS server) uses this approach to expose special files
4348 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
4349 */
4350
4351 if (!S_ISCHR(mode) && !S_ISBLK(mode))
4352 goto out;
4353
4354 cifs_dbg(FYI, "sfu compat create special file\n");
4355
4356 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
4357 if (buf == NULL) {
4358 rc = -ENOMEM;
4359 goto out;
4360 }
4361
4362 if (backup_cred(cifs_sb))
4363 create_options |= CREATE_OPEN_BACKUP_INTENT;
4364
4365 oparms.tcon = tcon;
4366 oparms.cifs_sb = cifs_sb;
4367 oparms.desired_access = GENERIC_WRITE;
4368 oparms.create_options = create_options;
4369 oparms.disposition = FILE_CREATE;
4370 oparms.path = full_path;
4371 oparms.fid = &fid;
4372 oparms.reconnect = false;
4373
4374 if (tcon->ses->server->oplocks)
4375 oplock = REQ_OPLOCK;
4376 else
4377 oplock = 0;
4378 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
4379 if (rc)
4380 goto out;
4381
4382 /*
4383 * BB Do not bother to decode buf since no local inode yet to put
4384 * timestamps in, but we can reuse it safely.
4385 */
4386
4387 pdev = (struct win_dev *)buf;
4388 io_parms.pid = current->tgid;
4389 io_parms.tcon = tcon;
4390 io_parms.offset = 0;
4391 io_parms.length = sizeof(struct win_dev);
4392 iov[1].iov_base = buf;
4393 iov[1].iov_len = sizeof(struct win_dev);
4394 if (S_ISCHR(mode)) {
4395 memcpy(pdev->type, "IntxCHR", 8);
4396 pdev->major = cpu_to_le64(MAJOR(dev));
4397 pdev->minor = cpu_to_le64(MINOR(dev));
4398 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4399 &bytes_written, iov, 1);
4400 } else if (S_ISBLK(mode)) {
4401 memcpy(pdev->type, "IntxBLK", 8);
4402 pdev->major = cpu_to_le64(MAJOR(dev));
4403 pdev->minor = cpu_to_le64(MINOR(dev));
4404 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4405 &bytes_written, iov, 1);
4406 }
4407 tcon->ses->server->ops->close(xid, tcon, &fid);
4408 d_drop(dentry);
4409
4410 /* FIXME: add code here to set EAs */
4411out:
4412 kfree(buf);
4413 return rc;
4414}
4415
4416
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004417struct smb_version_operations smb20_operations = {
4418 .compare_fids = smb2_compare_fids,
4419 .setup_request = smb2_setup_request,
4420 .setup_async_request = smb2_setup_async_request,
4421 .check_receive = smb2_check_receive,
4422 .add_credits = smb2_add_credits,
4423 .set_credits = smb2_set_credits,
4424 .get_credits_field = smb2_get_credits_field,
4425 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004426 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004427 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004428 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004429 .read_data_offset = smb2_read_data_offset,
4430 .read_data_length = smb2_read_data_length,
4431 .map_error = map_smb2_to_linux_error,
4432 .find_mid = smb2_find_mid,
4433 .check_message = smb2_check_message,
4434 .dump_detail = smb2_dump_detail,
4435 .clear_stats = smb2_clear_stats,
4436 .print_stats = smb2_print_stats,
4437 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004438 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004439 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004440 .need_neg = smb2_need_neg,
4441 .negotiate = smb2_negotiate,
4442 .negotiate_wsize = smb2_negotiate_wsize,
4443 .negotiate_rsize = smb2_negotiate_rsize,
4444 .sess_setup = SMB2_sess_setup,
4445 .logoff = SMB2_logoff,
4446 .tree_connect = SMB2_tcon,
4447 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004448 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004449 .is_path_accessible = smb2_is_path_accessible,
4450 .can_echo = smb2_can_echo,
4451 .echo = SMB2_echo,
4452 .query_path_info = smb2_query_path_info,
4453 .get_srv_inum = smb2_get_srv_inum,
4454 .query_file_info = smb2_query_file_info,
4455 .set_path_size = smb2_set_path_size,
4456 .set_file_size = smb2_set_file_size,
4457 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004458 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004459 .mkdir = smb2_mkdir,
4460 .mkdir_setinfo = smb2_mkdir_setinfo,
4461 .rmdir = smb2_rmdir,
4462 .unlink = smb2_unlink,
4463 .rename = smb2_rename_path,
4464 .create_hardlink = smb2_create_hardlink,
4465 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01004466 .query_mf_symlink = smb3_query_mf_symlink,
4467 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004468 .open = smb2_open_file,
4469 .set_fid = smb2_set_fid,
4470 .close = smb2_close_file,
4471 .flush = smb2_flush_file,
4472 .async_readv = smb2_async_readv,
4473 .async_writev = smb2_async_writev,
4474 .sync_read = smb2_sync_read,
4475 .sync_write = smb2_sync_write,
4476 .query_dir_first = smb2_query_dir_first,
4477 .query_dir_next = smb2_query_dir_next,
4478 .close_dir = smb2_close_dir,
4479 .calc_smb_size = smb2_calc_size,
4480 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004481 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004482 .oplock_response = smb2_oplock_response,
4483 .queryfs = smb2_queryfs,
4484 .mand_lock = smb2_mand_lock,
4485 .mand_unlock_range = smb2_unlock_range,
4486 .push_mand_locks = smb2_push_mandatory_locks,
4487 .get_lease_key = smb2_get_lease_key,
4488 .set_lease_key = smb2_set_lease_key,
4489 .new_lease_key = smb2_new_lease_key,
4490 .calc_signature = smb2_calc_signature,
4491 .is_read_op = smb2_is_read_op,
4492 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004493 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004494 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004495 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004496 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004497 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004498 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304499 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004500#ifdef CONFIG_CIFS_XATTR
4501 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004502 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004503#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004504 .get_acl = get_smb2_acl,
4505 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004506 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004507 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004508 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004509 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004510 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004511 .llseek = smb3_llseek,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004512};
4513
Steve French1080ef72011-02-24 18:07:19 +00004514struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004515 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004516 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04004517 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004518 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04004519 .add_credits = smb2_add_credits,
4520 .set_credits = smb2_set_credits,
4521 .get_credits_field = smb2_get_credits_field,
4522 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004523 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004524 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004525 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004526 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004527 .read_data_offset = smb2_read_data_offset,
4528 .read_data_length = smb2_read_data_length,
4529 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004530 .find_mid = smb2_find_mid,
4531 .check_message = smb2_check_message,
4532 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04004533 .clear_stats = smb2_clear_stats,
4534 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004535 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004536 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004537 .downgrade_oplock = smb21_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04004538 .need_neg = smb2_need_neg,
4539 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07004540 .negotiate_wsize = smb2_negotiate_wsize,
4541 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04004542 .sess_setup = SMB2_sess_setup,
4543 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04004544 .tree_connect = SMB2_tcon,
4545 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004546 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04004547 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04004548 .can_echo = smb2_can_echo,
4549 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04004550 .query_path_info = smb2_query_path_info,
4551 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07004552 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004553 .set_path_size = smb2_set_path_size,
4554 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004555 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004556 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04004557 .mkdir = smb2_mkdir,
4558 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04004559 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07004560 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004561 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07004562 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004563 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004564 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004565 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07004566 .open = smb2_open_file,
4567 .set_fid = smb2_set_fid,
4568 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07004569 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004570 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07004571 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004572 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004573 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004574 .query_dir_first = smb2_query_dir_first,
4575 .query_dir_next = smb2_query_dir_next,
4576 .close_dir = smb2_close_dir,
4577 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07004578 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004579 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004580 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07004581 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07004582 .mand_lock = smb2_mand_lock,
4583 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07004584 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07004585 .get_lease_key = smb2_get_lease_key,
4586 .set_lease_key = smb2_set_lease_key,
4587 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06004588 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004589 .is_read_op = smb21_is_read_op,
4590 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004591 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004592 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004593 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004594 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004595 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05004596 .enum_snapshots = smb3_enum_snapshots,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004597 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304598 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004599#ifdef CONFIG_CIFS_XATTR
4600 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004601 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004602#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004603 .get_acl = get_smb2_acl,
4604 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004605 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004606 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004607 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004608 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004609 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004610 .llseek = smb3_llseek,
Steve French38107d42012-12-08 22:08:06 -06004611};
4612
Steve French38107d42012-12-08 22:08:06 -06004613struct smb_version_operations smb30_operations = {
4614 .compare_fids = smb2_compare_fids,
4615 .setup_request = smb2_setup_request,
4616 .setup_async_request = smb2_setup_async_request,
4617 .check_receive = smb2_check_receive,
4618 .add_credits = smb2_add_credits,
4619 .set_credits = smb2_set_credits,
4620 .get_credits_field = smb2_get_credits_field,
4621 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004622 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004623 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06004624 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004625 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06004626 .read_data_offset = smb2_read_data_offset,
4627 .read_data_length = smb2_read_data_length,
4628 .map_error = map_smb2_to_linux_error,
4629 .find_mid = smb2_find_mid,
4630 .check_message = smb2_check_message,
4631 .dump_detail = smb2_dump_detail,
4632 .clear_stats = smb2_clear_stats,
4633 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05004634 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06004635 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004636 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004637 .downgrade_oplock = smb21_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06004638 .need_neg = smb2_need_neg,
4639 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004640 .negotiate_wsize = smb3_negotiate_wsize,
4641 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06004642 .sess_setup = SMB2_sess_setup,
4643 .logoff = SMB2_logoff,
4644 .tree_connect = SMB2_tcon,
4645 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05004646 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06004647 .is_path_accessible = smb2_is_path_accessible,
4648 .can_echo = smb2_can_echo,
4649 .echo = SMB2_echo,
4650 .query_path_info = smb2_query_path_info,
4651 .get_srv_inum = smb2_get_srv_inum,
4652 .query_file_info = smb2_query_file_info,
4653 .set_path_size = smb2_set_path_size,
4654 .set_file_size = smb2_set_file_size,
4655 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004656 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06004657 .mkdir = smb2_mkdir,
4658 .mkdir_setinfo = smb2_mkdir_setinfo,
4659 .rmdir = smb2_rmdir,
4660 .unlink = smb2_unlink,
4661 .rename = smb2_rename_path,
4662 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004663 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004664 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004665 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06004666 .open = smb2_open_file,
4667 .set_fid = smb2_set_fid,
4668 .close = smb2_close_file,
4669 .flush = smb2_flush_file,
4670 .async_readv = smb2_async_readv,
4671 .async_writev = smb2_async_writev,
4672 .sync_read = smb2_sync_read,
4673 .sync_write = smb2_sync_write,
4674 .query_dir_first = smb2_query_dir_first,
4675 .query_dir_next = smb2_query_dir_next,
4676 .close_dir = smb2_close_dir,
4677 .calc_smb_size = smb2_calc_size,
4678 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004679 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06004680 .oplock_response = smb2_oplock_response,
4681 .queryfs = smb2_queryfs,
4682 .mand_lock = smb2_mand_lock,
4683 .mand_unlock_range = smb2_unlock_range,
4684 .push_mand_locks = smb2_push_mandatory_locks,
4685 .get_lease_key = smb2_get_lease_key,
4686 .set_lease_key = smb2_set_lease_key,
4687 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004688 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06004689 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004690 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004691 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004692 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004693 .create_lease_buf = smb3_create_lease_buf,
4694 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004695 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05004696 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06004697 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004698 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004699 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05004700 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004701 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004702 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004703 .is_transform_hdr = smb3_is_transform_hdr,
4704 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004705 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304706 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004707#ifdef CONFIG_CIFS_XATTR
4708 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004709 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004710#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004711 .get_acl = get_smb2_acl,
4712 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004713 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004714 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004715 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004716 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004717 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004718 .llseek = smb3_llseek,
Steve French1080ef72011-02-24 18:07:19 +00004719};
4720
Steve Frenchaab18932015-06-23 23:37:11 -05004721struct smb_version_operations smb311_operations = {
4722 .compare_fids = smb2_compare_fids,
4723 .setup_request = smb2_setup_request,
4724 .setup_async_request = smb2_setup_async_request,
4725 .check_receive = smb2_check_receive,
4726 .add_credits = smb2_add_credits,
4727 .set_credits = smb2_set_credits,
4728 .get_credits_field = smb2_get_credits_field,
4729 .get_credits = smb2_get_credits,
4730 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004731 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05004732 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004733 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05004734 .read_data_offset = smb2_read_data_offset,
4735 .read_data_length = smb2_read_data_length,
4736 .map_error = map_smb2_to_linux_error,
4737 .find_mid = smb2_find_mid,
4738 .check_message = smb2_check_message,
4739 .dump_detail = smb2_dump_detail,
4740 .clear_stats = smb2_clear_stats,
4741 .print_stats = smb2_print_stats,
4742 .dump_share_caps = smb2_dump_share_caps,
4743 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004744 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004745 .downgrade_oplock = smb21_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05004746 .need_neg = smb2_need_neg,
4747 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004748 .negotiate_wsize = smb3_negotiate_wsize,
4749 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05004750 .sess_setup = SMB2_sess_setup,
4751 .logoff = SMB2_logoff,
4752 .tree_connect = SMB2_tcon,
4753 .tree_disconnect = SMB2_tdis,
4754 .qfs_tcon = smb3_qfs_tcon,
4755 .is_path_accessible = smb2_is_path_accessible,
4756 .can_echo = smb2_can_echo,
4757 .echo = SMB2_echo,
4758 .query_path_info = smb2_query_path_info,
4759 .get_srv_inum = smb2_get_srv_inum,
4760 .query_file_info = smb2_query_file_info,
4761 .set_path_size = smb2_set_path_size,
4762 .set_file_size = smb2_set_file_size,
4763 .set_file_info = smb2_set_file_info,
4764 .set_compression = smb2_set_compression,
4765 .mkdir = smb2_mkdir,
4766 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05004767 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05004768 .rmdir = smb2_rmdir,
4769 .unlink = smb2_unlink,
4770 .rename = smb2_rename_path,
4771 .create_hardlink = smb2_create_hardlink,
4772 .query_symlink = smb2_query_symlink,
4773 .query_mf_symlink = smb3_query_mf_symlink,
4774 .create_mf_symlink = smb3_create_mf_symlink,
4775 .open = smb2_open_file,
4776 .set_fid = smb2_set_fid,
4777 .close = smb2_close_file,
4778 .flush = smb2_flush_file,
4779 .async_readv = smb2_async_readv,
4780 .async_writev = smb2_async_writev,
4781 .sync_read = smb2_sync_read,
4782 .sync_write = smb2_sync_write,
4783 .query_dir_first = smb2_query_dir_first,
4784 .query_dir_next = smb2_query_dir_next,
4785 .close_dir = smb2_close_dir,
4786 .calc_smb_size = smb2_calc_size,
4787 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004788 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05004789 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05004790 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05004791 .mand_lock = smb2_mand_lock,
4792 .mand_unlock_range = smb2_unlock_range,
4793 .push_mand_locks = smb2_push_mandatory_locks,
4794 .get_lease_key = smb2_get_lease_key,
4795 .set_lease_key = smb2_set_lease_key,
4796 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004797 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05004798 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004799 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05004800 .is_read_op = smb21_is_read_op,
4801 .set_oplock_level = smb3_set_oplock_level,
4802 .create_lease_buf = smb3_create_lease_buf,
4803 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004804 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07004805 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05004806/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
4807 .wp_retry_size = smb2_wp_retry_size,
4808 .dir_needs_close = smb2_dir_needs_close,
4809 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004810 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004811 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004812 .is_transform_hdr = smb3_is_transform_hdr,
4813 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004814 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304815 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004816#ifdef CONFIG_CIFS_XATTR
4817 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004818 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004819#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10004820 .get_acl = get_smb2_acl,
4821 .get_acl_by_fid = get_smb2_acl_by_fid,
4822 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004823 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004824 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004825 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004826 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004827 .llseek = smb3_llseek,
Steve Frenchaab18932015-06-23 23:37:11 -05004828};
Steve Frenchaab18932015-06-23 23:37:11 -05004829
Steve Frenchdd446b12012-11-28 23:21:06 -06004830struct smb_version_values smb20_values = {
4831 .version_string = SMB20_VERSION_STRING,
4832 .protocol_id = SMB20_PROT_ID,
4833 .req_capabilities = 0, /* MBZ */
4834 .large_lock_type = 0,
4835 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4836 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4837 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004838 .header_size = sizeof(struct smb2_sync_hdr),
4839 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06004840 .max_header_size = MAX_SMB2_HDR_SIZE,
4841 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4842 .lock_cmd = SMB2_LOCK,
4843 .cap_unix = 0,
4844 .cap_nt_find = SMB2_NT_FIND,
4845 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004846 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4847 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004848 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06004849};
4850
Steve French1080ef72011-02-24 18:07:19 +00004851struct smb_version_values smb21_values = {
4852 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004853 .protocol_id = SMB21_PROT_ID,
4854 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
4855 .large_lock_type = 0,
4856 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4857 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4858 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004859 .header_size = sizeof(struct smb2_sync_hdr),
4860 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004861 .max_header_size = MAX_SMB2_HDR_SIZE,
4862 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4863 .lock_cmd = SMB2_LOCK,
4864 .cap_unix = 0,
4865 .cap_nt_find = SMB2_NT_FIND,
4866 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004867 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4868 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004869 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05004870};
4871
Steve French9764c022017-09-17 10:41:35 -05004872struct smb_version_values smb3any_values = {
4873 .version_string = SMB3ANY_VERSION_STRING,
4874 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004875 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004876 .large_lock_type = 0,
4877 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4878 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4879 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004880 .header_size = sizeof(struct smb2_sync_hdr),
4881 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004882 .max_header_size = MAX_SMB2_HDR_SIZE,
4883 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4884 .lock_cmd = SMB2_LOCK,
4885 .cap_unix = 0,
4886 .cap_nt_find = SMB2_NT_FIND,
4887 .cap_large_files = SMB2_LARGE_FILES,
4888 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4889 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4890 .create_lease_size = sizeof(struct create_lease_v2),
4891};
4892
4893struct smb_version_values smbdefault_values = {
4894 .version_string = SMBDEFAULT_VERSION_STRING,
4895 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004896 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004897 .large_lock_type = 0,
4898 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4899 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4900 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004901 .header_size = sizeof(struct smb2_sync_hdr),
4902 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004903 .max_header_size = MAX_SMB2_HDR_SIZE,
4904 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4905 .lock_cmd = SMB2_LOCK,
4906 .cap_unix = 0,
4907 .cap_nt_find = SMB2_NT_FIND,
4908 .cap_large_files = SMB2_LARGE_FILES,
4909 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4910 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4911 .create_lease_size = sizeof(struct create_lease_v2),
4912};
4913
Steve Frenche4aa25e2012-10-01 12:26:22 -05004914struct smb_version_values smb30_values = {
4915 .version_string = SMB30_VERSION_STRING,
4916 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004917 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004918 .large_lock_type = 0,
4919 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4920 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4921 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004922 .header_size = sizeof(struct smb2_sync_hdr),
4923 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004924 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004925 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004926 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004927 .cap_unix = 0,
4928 .cap_nt_find = SMB2_NT_FIND,
4929 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004930 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4931 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004932 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00004933};
Steve French20b6d8b2013-06-12 22:48:41 -05004934
4935struct smb_version_values smb302_values = {
4936 .version_string = SMB302_VERSION_STRING,
4937 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004938 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05004939 .large_lock_type = 0,
4940 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4941 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4942 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004943 .header_size = sizeof(struct smb2_sync_hdr),
4944 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05004945 .max_header_size = MAX_SMB2_HDR_SIZE,
4946 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4947 .lock_cmd = SMB2_LOCK,
4948 .cap_unix = 0,
4949 .cap_nt_find = SMB2_NT_FIND,
4950 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004951 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4952 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004953 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05004954};
Steve French5f7fbf72014-12-17 22:52:58 -06004955
Steve French5f7fbf72014-12-17 22:52:58 -06004956struct smb_version_values smb311_values = {
4957 .version_string = SMB311_VERSION_STRING,
4958 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004959 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06004960 .large_lock_type = 0,
4961 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4962 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4963 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004964 .header_size = sizeof(struct smb2_sync_hdr),
4965 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06004966 .max_header_size = MAX_SMB2_HDR_SIZE,
4967 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4968 .lock_cmd = SMB2_LOCK,
4969 .cap_unix = 0,
4970 .cap_nt_find = SMB2_NT_FIND,
4971 .cap_large_files = SMB2_LARGE_FILES,
4972 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4973 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4974 .create_lease_size = sizeof(struct create_lease_v2),
4975};