blob: f28d4207bbdaaa8bd8c2532b9e1430c19ab43e6d [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Steve French1080ef72011-02-24 18:07:19 +00006 */
7
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07008#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07009#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050010#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070011#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020012#include <linux/uuid.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070013#include <crypto/aead.h>
Steve French1080ef72011-02-24 18:07:19 +000014#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040015#include "smb2pdu.h"
16#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040017#include "cifsproto.h"
18#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040019#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070020#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070021#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050022#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070023#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040024
Pavel Shilovskyef68e832019-01-18 17:25:36 -080025/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040026static int
27change_conf(struct TCP_Server_Info *server)
28{
29 server->credits += server->echo_credits + server->oplock_credits;
30 server->oplock_credits = server->echo_credits = 0;
31 switch (server->credits) {
32 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080033 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040034 case 1:
35 server->echoes = false;
36 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040037 break;
38 case 2:
39 server->echoes = true;
40 server->oplocks = false;
41 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040042 break;
43 default:
44 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050045 if (enable_oplocks) {
46 server->oplocks = true;
47 server->oplock_credits = 1;
48 } else
49 server->oplocks = false;
50
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040051 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040052 }
53 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080054 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040055}
56
57static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080058smb2_add_credits(struct TCP_Server_Info *server,
59 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040060{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080061 int *val, rc = -1;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080062 unsigned int add = credits->value;
63 unsigned int instance = credits->instance;
64 bool reconnect_detected = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080065
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040066 spin_lock(&server->req_lock);
67 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050068
69 /* eg found case where write overlapping reconnect messed up credits */
70 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
71 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
72 server->hostname, *val);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080073 if ((instance == 0) || (instance == server->reconnect_instance))
74 *val += add;
75 else
76 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050077
Steve French141891f2016-09-23 00:44:16 -050078 if (*val > 65000) {
79 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
80 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
81 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040082 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040083 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040084 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070085 /*
86 * Sometimes server returns 0 credits on oplock break ack - we need to
87 * rebalance credits in this case.
88 */
89 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
90 server->oplocks) {
91 if (server->credits > 1) {
92 server->credits--;
93 server->oplock_credits++;
94 }
95 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040096 spin_unlock(&server->req_lock);
97 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -080098
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080099 if (reconnect_detected)
100 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
101 add, instance);
102
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800103 if (server->tcpStatus == CifsNeedReconnect
104 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800105 return;
106
107 switch (rc) {
108 case -1:
109 /* change_conf hasn't been executed */
110 break;
111 case 0:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000112 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800113 break;
114 case 1:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000115 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800116 break;
117 case 2:
118 cifs_dbg(FYI, "disabling oplocks\n");
119 break;
120 default:
121 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
122 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400123}
124
125static void
126smb2_set_credits(struct TCP_Server_Info *server, const int val)
127{
128 spin_lock(&server->req_lock);
129 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500130 if (val == 1)
131 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400132 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500133 /* don't log while holding the lock */
134 if (val == 1)
135 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400136}
137
138static int *
139smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
140{
141 switch (optype) {
142 case CIFS_ECHO_OP:
143 return &server->echo_credits;
144 case CIFS_OBREAK_OP:
145 return &server->oplock_credits;
146 default:
147 return &server->credits;
148 }
149}
150
151static unsigned int
152smb2_get_credits(struct mid_q_entry *mid)
153{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000154 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700155
Pavel Shilovsky3d3003f2019-01-22 16:50:21 -0800156 if (mid->mid_state == MID_RESPONSE_RECEIVED
157 || mid->mid_state == MID_RESPONSE_MALFORMED)
158 return le16_to_cpu(shdr->CreditRequest);
159
160 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400161}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400162
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400163static int
164smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800165 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400166{
167 int rc = 0;
168 unsigned int scredits;
169
170 spin_lock(&server->req_lock);
171 while (1) {
172 if (server->credits <= 0) {
173 spin_unlock(&server->req_lock);
174 cifs_num_waiters_inc(server);
175 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000176 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400177 cifs_num_waiters_dec(server);
178 if (rc)
179 return rc;
180 spin_lock(&server->req_lock);
181 } else {
182 if (server->tcpStatus == CifsExiting) {
183 spin_unlock(&server->req_lock);
184 return -ENOENT;
185 }
186
187 scredits = server->credits;
188 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800189 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400190 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800191 credits->value = 0;
192 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400193 break;
194 }
195
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800196 /* leave some credits for reopen and other ops */
197 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400198 *num = min_t(unsigned int, size,
199 scredits * SMB2_MAX_BUFFER_SIZE);
200
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800201 credits->value =
202 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
203 credits->instance = server->reconnect_instance;
204 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400205 server->in_flight++;
Steve French1b63f182019-09-09 22:57:11 -0500206 if (server->in_flight > server->max_in_flight)
207 server->max_in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400208 break;
209 }
210 }
211 spin_unlock(&server->req_lock);
212 return rc;
213}
214
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800215static int
216smb2_adjust_credits(struct TCP_Server_Info *server,
217 struct cifs_credits *credits,
218 const unsigned int payload_size)
219{
220 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
221
222 if (!credits->value || credits->value == new_val)
223 return 0;
224
225 if (credits->value < new_val) {
226 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
227 credits->value, new_val);
228 return -ENOTSUPP;
229 }
230
231 spin_lock(&server->req_lock);
232
233 if (server->reconnect_instance != credits->instance) {
234 spin_unlock(&server->req_lock);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000235 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800236 credits->value - new_val);
237 return -EAGAIN;
238 }
239
240 server->credits += credits->value - new_val;
241 spin_unlock(&server->req_lock);
242 wake_up(&server->request_q);
243 credits->value = new_val;
244 return 0;
245}
246
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400247static __u64
248smb2_get_next_mid(struct TCP_Server_Info *server)
249{
250 __u64 mid;
251 /* for SMB2 we need the current value */
252 spin_lock(&GlobalMid_Lock);
253 mid = server->CurrentMid++;
254 spin_unlock(&GlobalMid_Lock);
255 return mid;
256}
Steve French1080ef72011-02-24 18:07:19 +0000257
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800258static void
259smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
260{
261 spin_lock(&GlobalMid_Lock);
262 if (server->CurrentMid >= val)
263 server->CurrentMid -= val;
264 spin_unlock(&GlobalMid_Lock);
265}
266
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400267static struct mid_q_entry *
268smb2_find_mid(struct TCP_Server_Info *server, char *buf)
269{
270 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000271 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700272 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400273
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700274 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000275 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600276 return NULL;
277 }
278
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400279 spin_lock(&GlobalMid_Lock);
280 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000281 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400282 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700283 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200284 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400285 spin_unlock(&GlobalMid_Lock);
286 return mid;
287 }
288 }
289 spin_unlock(&GlobalMid_Lock);
290 return NULL;
291}
292
293static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600294smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400295{
296#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000297 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400298
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000299 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700300 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
301 shdr->ProcessId);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000302 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500303 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400304#endif
305}
306
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400307static bool
308smb2_need_neg(struct TCP_Server_Info *server)
309{
310 return server->max_read == 0;
311}
312
313static int
314smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
315{
316 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200317
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400318 ses->server->CurrentMid = 0;
319 rc = SMB2_negotiate(xid, ses);
320 /* BB we probably don't need to retry with modern servers */
321 if (rc == -EAGAIN)
322 rc = -EHOSTDOWN;
323 return rc;
324}
325
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700326static unsigned int
327smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
328{
329 struct TCP_Server_Info *server = tcon->ses->server;
330 unsigned int wsize;
331
332 /* start with specified wsize, or default */
333 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
334 wsize = min_t(unsigned int, wsize, server->max_write);
Long Li09902f82017-11-22 17:38:39 -0700335#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700336 if (server->rdma) {
337 if (server->sign)
338 wsize = min_t(unsigned int,
339 wsize, server->smbd_conn->max_fragmented_send_size);
340 else
341 wsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700342 wsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700343 }
Long Li09902f82017-11-22 17:38:39 -0700344#endif
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400345 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
346 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700347
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700348 return wsize;
349}
350
351static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500352smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
353{
354 struct TCP_Server_Info *server = tcon->ses->server;
355 unsigned int wsize;
356
357 /* start with specified wsize, or default */
358 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
359 wsize = min_t(unsigned int, wsize, server->max_write);
360#ifdef CONFIG_CIFS_SMB_DIRECT
361 if (server->rdma) {
362 if (server->sign)
363 wsize = min_t(unsigned int,
364 wsize, server->smbd_conn->max_fragmented_send_size);
365 else
366 wsize = min_t(unsigned int,
367 wsize, server->smbd_conn->max_readwrite_size);
368 }
369#endif
370 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
371 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
372
373 return wsize;
374}
375
376static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700377smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
378{
379 struct TCP_Server_Info *server = tcon->ses->server;
380 unsigned int rsize;
381
382 /* start with specified rsize, or default */
383 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
384 rsize = min_t(unsigned int, rsize, server->max_read);
Long Li09902f82017-11-22 17:38:39 -0700385#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700386 if (server->rdma) {
387 if (server->sign)
388 rsize = min_t(unsigned int,
389 rsize, server->smbd_conn->max_fragmented_recv_size);
390 else
391 rsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700392 rsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700393 }
Long Li09902f82017-11-22 17:38:39 -0700394#endif
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400395
396 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
397 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700398
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700399 return rsize;
400}
401
Steve French3d621232018-09-25 15:33:47 -0500402static unsigned int
403smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
404{
405 struct TCP_Server_Info *server = tcon->ses->server;
406 unsigned int rsize;
407
408 /* start with specified rsize, or default */
409 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
410 rsize = min_t(unsigned int, rsize, server->max_read);
411#ifdef CONFIG_CIFS_SMB_DIRECT
412 if (server->rdma) {
413 if (server->sign)
414 rsize = min_t(unsigned int,
415 rsize, server->smbd_conn->max_fragmented_recv_size);
416 else
417 rsize = min_t(unsigned int,
418 rsize, server->smbd_conn->max_readwrite_size);
419 }
420#endif
421
422 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
423 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
424
425 return rsize;
426}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200427
428static int
429parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
430 size_t buf_len,
431 struct cifs_server_iface **iface_list,
432 size_t *iface_count)
433{
434 struct network_interface_info_ioctl_rsp *p;
435 struct sockaddr_in *addr4;
436 struct sockaddr_in6 *addr6;
437 struct iface_info_ipv4 *p4;
438 struct iface_info_ipv6 *p6;
439 struct cifs_server_iface *info;
440 ssize_t bytes_left;
441 size_t next = 0;
442 int nb_iface = 0;
443 int rc = 0;
444
445 *iface_list = NULL;
446 *iface_count = 0;
447
448 /*
449 * Fist pass: count and sanity check
450 */
451
452 bytes_left = buf_len;
453 p = buf;
454 while (bytes_left >= sizeof(*p)) {
455 nb_iface++;
456 next = le32_to_cpu(p->Next);
457 if (!next) {
458 bytes_left -= sizeof(*p);
459 break;
460 }
461 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
462 bytes_left -= next;
463 }
464
465 if (!nb_iface) {
466 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
467 rc = -EINVAL;
468 goto out;
469 }
470
471 if (bytes_left || p->Next)
472 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
473
474
475 /*
476 * Second pass: extract info to internal structure
477 */
478
479 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
480 if (!*iface_list) {
481 rc = -ENOMEM;
482 goto out;
483 }
484
485 info = *iface_list;
486 bytes_left = buf_len;
487 p = buf;
488 while (bytes_left >= sizeof(*p)) {
489 info->speed = le64_to_cpu(p->LinkSpeed);
490 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
491 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
492
493 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
494 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
495 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
496 le32_to_cpu(p->Capability));
497
498 switch (p->Family) {
499 /*
500 * The kernel and wire socket structures have the same
501 * layout and use network byte order but make the
502 * conversion explicit in case either one changes.
503 */
504 case INTERNETWORK:
505 addr4 = (struct sockaddr_in *)&info->sockaddr;
506 p4 = (struct iface_info_ipv4 *)p->Buffer;
507 addr4->sin_family = AF_INET;
508 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
509
510 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
511 addr4->sin_port = cpu_to_be16(CIFS_PORT);
512
513 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
514 &addr4->sin_addr);
515 break;
516 case INTERNETWORKV6:
517 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
518 p6 = (struct iface_info_ipv6 *)p->Buffer;
519 addr6->sin6_family = AF_INET6;
520 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
521
522 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
523 addr6->sin6_flowinfo = 0;
524 addr6->sin6_scope_id = 0;
525 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
526
527 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
528 &addr6->sin6_addr);
529 break;
530 default:
531 cifs_dbg(VFS,
532 "%s: skipping unsupported socket family\n",
533 __func__);
534 goto next_iface;
535 }
536
537 (*iface_count)++;
538 info++;
539next_iface:
540 next = le32_to_cpu(p->Next);
541 if (!next)
542 break;
543 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
544 bytes_left -= next;
545 }
546
547 if (!*iface_count) {
548 rc = -EINVAL;
549 goto out;
550 }
551
552out:
553 if (rc) {
554 kfree(*iface_list);
555 *iface_count = 0;
556 *iface_list = NULL;
557 }
558 return rc;
559}
560
561
Steve Frenchc481e9f2013-10-14 01:21:53 -0500562static int
563SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
564{
565 int rc;
566 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200567 struct network_interface_info_ioctl_rsp *out_buf = NULL;
568 struct cifs_server_iface *iface_list;
569 size_t iface_count;
570 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500571
572 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
573 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
574 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500575 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500576 if (rc == -EOPNOTSUPP) {
577 cifs_dbg(FYI,
578 "server does not support query network interfaces\n");
579 goto out;
580 } else if (rc != 0) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000581 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200582 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500583 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200584
585 rc = parse_server_interfaces(out_buf, ret_data_len,
586 &iface_list, &iface_count);
587 if (rc)
588 goto out;
589
590 spin_lock(&ses->iface_lock);
591 kfree(ses->iface_list);
592 ses->iface_list = iface_list;
593 ses->iface_count = iface_count;
594 ses->iface_last_update = jiffies;
595 spin_unlock(&ses->iface_lock);
596
597out:
Steve French24df1482016-09-29 04:20:23 -0500598 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500599 return rc;
600}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500601
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000602static void
603smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000604{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000605 struct cached_fid *cfid = container_of(ref, struct cached_fid,
606 refcount);
607
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000608 if (cfid->is_valid) {
609 cifs_dbg(FYI, "clear cached root file handle\n");
610 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
611 cfid->fid->volatile_fid);
612 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000613 cfid->file_all_info_is_valid = false;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000614 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000615}
616
617void close_shroot(struct cached_fid *cfid)
618{
619 mutex_lock(&cfid->fid_mutex);
620 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000621 mutex_unlock(&cfid->fid_mutex);
622}
623
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000624void
625smb2_cached_lease_break(struct work_struct *work)
626{
627 struct cached_fid *cfid = container_of(work,
628 struct cached_fid, lease_break);
629
630 close_shroot(cfid);
631}
632
Steve French3d4ef9a2018-04-25 22:19:09 -0500633/*
634 * Open the directory at the root of a share
635 */
636int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
637{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000638 struct cifs_ses *ses = tcon->ses;
639 struct TCP_Server_Info *server = ses->server;
640 struct cifs_open_parms oparms;
641 struct smb2_create_rsp *o_rsp = NULL;
642 struct smb2_query_info_rsp *qi_rsp = NULL;
643 int resp_buftype[2];
644 struct smb_rqst rqst[2];
645 struct kvec rsp_iov[2];
646 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
647 struct kvec qi_iov[1];
648 int rc, flags = 0;
649 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000650 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Steve French3d4ef9a2018-04-25 22:19:09 -0500651
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000652 mutex_lock(&tcon->crfid.fid_mutex);
653 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500654 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000655 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000656 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000657 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500658 return 0;
659 }
660
Steve French96d9f7e2019-09-12 17:52:54 -0500661 /*
662 * We do not hold the lock for the open because in case
663 * SMB2_open needs to reconnect, it will end up calling
664 * cifs_mark_open_files_invalid() which takes the lock again
665 * thus causing a deadlock
666 */
667
668 mutex_unlock(&tcon->crfid.fid_mutex);
669
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000670 if (smb3_encryption_required(tcon))
671 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500672
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000673 memset(rqst, 0, sizeof(rqst));
674 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
675 memset(rsp_iov, 0, sizeof(rsp_iov));
676
677 /* Open */
678 memset(&open_iov, 0, sizeof(open_iov));
679 rqst[0].rq_iov = open_iov;
680 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
681
682 oparms.tcon = tcon;
683 oparms.create_options = 0;
684 oparms.desired_access = FILE_READ_ATTRIBUTES;
685 oparms.disposition = FILE_OPEN;
686 oparms.fid = pfid;
687 oparms.reconnect = false;
688
689 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
690 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500691 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000692 smb2_set_next_command(tcon, &rqst[0]);
693
694 memset(&qi_iov, 0, sizeof(qi_iov));
695 rqst[1].rq_iov = qi_iov;
696 rqst[1].rq_nvec = 1;
697
698 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
699 COMPOUND_FID, FILE_ALL_INFORMATION,
700 SMB2_O_INFO_FILE, 0,
701 sizeof(struct smb2_file_all_info) +
702 PATH_MAX * 2, 0, NULL);
703 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500704 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000705
706 smb2_set_related(&rqst[1]);
707
708 rc = compound_send_recv(xid, ses, flags, 2, rqst,
709 resp_buftype, rsp_iov);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200710 mutex_lock(&tcon->crfid.fid_mutex);
711
712 /*
713 * Now we need to check again as the cached root might have
714 * been successfully re-opened from a concurrent process
715 */
716
717 if (tcon->crfid.is_valid) {
718 /* work was already done */
719
720 /* stash fids for close() later */
721 struct cifs_fid fid = {
722 .persistent_fid = pfid->persistent_fid,
723 .volatile_fid = pfid->volatile_fid,
724 };
725
726 /*
727 * caller expects this func to set pfid to a valid
728 * cached root, so we copy the existing one and get a
729 * reference.
730 */
731 memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
732 kref_get(&tcon->crfid.refcount);
733
734 mutex_unlock(&tcon->crfid.fid_mutex);
735
736 if (rc == 0) {
737 /* close extra handle outside of crit sec */
738 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
739 }
740 goto oshr_free;
741 }
742
743 /* Cached root is still invalid, continue normaly */
744
Steve French7dcc82c2019-09-11 00:07:36 -0500745 if (rc) {
746 if (rc == -EREMCHG) {
747 tcon->need_reconnect = true;
748 printk_once(KERN_WARNING "server share %s deleted\n",
749 tcon->treeName);
750 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000751 goto oshr_exit;
Steve French7dcc82c2019-09-11 00:07:36 -0500752 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000753
Steve Frenchd2f15422019-09-22 00:55:46 -0500754 atomic_inc(&tcon->num_remote_opens);
755
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000756 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
757 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
758 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
759#ifdef CONFIG_CIFS_DEBUG2
760 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
761#endif /* CIFS_DEBUG2 */
762
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000763 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
764 tcon->crfid.tcon = tcon;
765 tcon->crfid.is_valid = true;
766 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000767
Steve French89a5bfa2019-07-18 17:22:18 -0500768 /* BB TBD check to see if oplock level check can be removed below */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000769 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
770 kref_get(&tcon->crfid.refcount);
Steve French89a5bfa2019-07-18 17:22:18 -0500771 smb2_parse_contexts(server, o_rsp,
772 &oparms.fid->epoch,
773 oparms.fid->lease_key, &oplock, NULL);
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000774 } else
775 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000776
777 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
778 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
779 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000780 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000781 le16_to_cpu(qi_rsp->OutputBufferOffset),
782 sizeof(struct smb2_file_all_info),
783 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000784 (char *)&tcon->crfid.file_all_info))
785 tcon->crfid.file_all_info_is_valid = 1;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000786
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200787oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000788 mutex_unlock(&tcon->crfid.fid_mutex);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200789oshr_free:
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000790 SMB2_open_free(&rqst[0]);
791 SMB2_query_info_free(&rqst[1]);
792 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
793 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French3d4ef9a2018-04-25 22:19:09 -0500794 return rc;
795}
796
Steve French34f62642013-10-09 02:07:00 -0500797static void
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500798smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
799{
800 int rc;
801 __le16 srch_path = 0; /* Null - open root of share */
802 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
803 struct cifs_open_parms oparms;
804 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500805 bool no_cached_open = tcon->nohandlecache;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500806
807 oparms.tcon = tcon;
808 oparms.desired_access = FILE_READ_ATTRIBUTES;
809 oparms.disposition = FILE_OPEN;
810 oparms.create_options = 0;
811 oparms.fid = &fid;
812 oparms.reconnect = false;
813
Steve French3d4ef9a2018-04-25 22:19:09 -0500814 if (no_cached_open)
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000815 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
816 NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500817 else
818 rc = open_shroot(xid, tcon, &fid);
819
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500820 if (rc)
821 return;
822
Steve Frenchc481e9f2013-10-14 01:21:53 -0500823 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500824
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500825 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
826 FS_ATTRIBUTE_INFORMATION);
827 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
828 FS_DEVICE_INFORMATION);
829 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500830 FS_VOLUME_INFORMATION);
831 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500832 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500833 if (no_cached_open)
834 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000835 else
836 close_shroot(&tcon->crfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500837}
838
839static void
Steve French34f62642013-10-09 02:07:00 -0500840smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
841{
842 int rc;
843 __le16 srch_path = 0; /* Null - open root of share */
844 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
845 struct cifs_open_parms oparms;
846 struct cifs_fid fid;
847
848 oparms.tcon = tcon;
849 oparms.desired_access = FILE_READ_ATTRIBUTES;
850 oparms.disposition = FILE_OPEN;
851 oparms.create_options = 0;
852 oparms.fid = &fid;
853 oparms.reconnect = false;
854
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000855 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500856 if (rc)
857 return;
858
Steven French21671142013-10-09 13:36:35 -0500859 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
860 FS_ATTRIBUTE_INFORMATION);
861 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
862 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500863 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -0500864}
865
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400866static int
867smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
868 struct cifs_sb_info *cifs_sb, const char *full_path)
869{
870 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400871 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700872 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400873 struct cifs_open_parms oparms;
874 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400875
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000876 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500877 return 0;
878
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400879 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
880 if (!utf16_path)
881 return -ENOMEM;
882
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400883 oparms.tcon = tcon;
884 oparms.desired_access = FILE_READ_ATTRIBUTES;
885 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -0500886 if (backup_cred(cifs_sb))
887 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
888 else
889 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400890 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400891 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400892
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000893 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400894 if (rc) {
895 kfree(utf16_path);
896 return rc;
897 }
898
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400899 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400900 kfree(utf16_path);
901 return rc;
902}
903
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400904static int
905smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
906 struct cifs_sb_info *cifs_sb, const char *full_path,
907 u64 *uniqueid, FILE_ALL_INFO *data)
908{
909 *uniqueid = le64_to_cpu(data->IndexNumber);
910 return 0;
911}
912
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700913static int
914smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
915 struct cifs_fid *fid, FILE_ALL_INFO *data)
916{
917 int rc;
918 struct smb2_file_all_info *smb2_data;
919
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400920 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700921 GFP_KERNEL);
922 if (smb2_data == NULL)
923 return -ENOMEM;
924
925 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
926 smb2_data);
927 if (!rc)
928 move_smb2_info_to_cifs(data, smb2_data);
929 kfree(smb2_data);
930 return rc;
931}
932
Arnd Bergmann1368f152017-09-05 11:24:15 +0200933#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000934static ssize_t
935move_smb2_ea_to_cifs(char *dst, size_t dst_size,
936 struct smb2_file_full_ea_info *src, size_t src_size,
937 const unsigned char *ea_name)
938{
939 int rc = 0;
940 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
941 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000942 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000943 size_t name_len, value_len, user_name_len;
944
945 while (src_size > 0) {
946 name = &src->ea_data[0];
947 name_len = (size_t)src->ea_name_length;
948 value = &src->ea_data[src->ea_name_length + 1];
949 value_len = (size_t)le16_to_cpu(src->ea_value_length);
950
Christoph Probsta205d502019-05-08 21:36:25 +0200951 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000952 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000953
954 if (src_size < 8 + name_len + 1 + value_len) {
955 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
956 rc = -EIO;
957 goto out;
958 }
959
960 if (ea_name) {
961 if (ea_name_len == name_len &&
962 memcmp(ea_name, name, name_len) == 0) {
963 rc = value_len;
964 if (dst_size == 0)
965 goto out;
966 if (dst_size < value_len) {
967 rc = -ERANGE;
968 goto out;
969 }
970 memcpy(dst, value, value_len);
971 goto out;
972 }
973 } else {
974 /* 'user.' plus a terminating null */
975 user_name_len = 5 + 1 + name_len;
976
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000977 if (buf_size == 0) {
978 /* skip copy - calc size only */
979 rc += user_name_len;
980 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000981 dst_size -= user_name_len;
982 memcpy(dst, "user.", 5);
983 dst += 5;
984 memcpy(dst, src->ea_data, name_len);
985 dst += name_len;
986 *dst = 0;
987 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000988 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000989 } else {
990 /* stop before overrun buffer */
991 rc = -ERANGE;
992 break;
993 }
994 }
995
996 if (!src->next_entry_offset)
997 break;
998
999 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1000 /* stop before overrun buffer */
1001 rc = -ERANGE;
1002 break;
1003 }
1004 src_size -= le32_to_cpu(src->next_entry_offset);
1005 src = (void *)((char *)src +
1006 le32_to_cpu(src->next_entry_offset));
1007 }
1008
1009 /* didn't find the named attribute */
1010 if (ea_name)
1011 rc = -ENODATA;
1012
1013out:
1014 return (ssize_t)rc;
1015}
1016
1017static ssize_t
1018smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1019 const unsigned char *path, const unsigned char *ea_name,
1020 char *ea_data, size_t buf_size,
1021 struct cifs_sb_info *cifs_sb)
1022{
1023 int rc;
1024 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001025 struct kvec rsp_iov = {NULL, 0};
1026 int buftype = CIFS_NO_BUFFER;
1027 struct smb2_query_info_rsp *rsp;
1028 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001029
1030 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1031 if (!utf16_path)
1032 return -ENOMEM;
1033
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001034 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1035 FILE_READ_EA,
1036 FILE_FULL_EA_INFORMATION,
1037 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +10001038 CIFSMaxBufSize -
1039 MAX_SMB2_CREATE_RESPONSE_SIZE -
1040 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001041 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001042 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001043 /*
1044 * If ea_name is NULL (listxattr) and there are no EAs,
1045 * return 0 as it's not an error. Otherwise, the specified
1046 * ea_name was not found.
1047 */
1048 if (!ea_name && rc == -ENODATA)
1049 rc = 0;
1050 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001051 }
1052
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001053 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1054 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1055 le32_to_cpu(rsp->OutputBufferLength),
1056 &rsp_iov,
1057 sizeof(struct smb2_file_full_ea_info));
1058 if (rc)
1059 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001060
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001061 info = (struct smb2_file_full_ea_info *)(
1062 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1063 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1064 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001065
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001066 qeas_exit:
1067 kfree(utf16_path);
1068 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001069 return rc;
1070}
1071
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001072
1073static int
1074smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1075 const char *path, const char *ea_name, const void *ea_value,
1076 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1077 struct cifs_sb_info *cifs_sb)
1078{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001079 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001080 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001081 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001082 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001083 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001084 struct smb_rqst rqst[3];
1085 int resp_buftype[3];
1086 struct kvec rsp_iov[3];
1087 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1088 struct cifs_open_parms oparms;
1089 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1090 struct cifs_fid fid;
1091 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1092 unsigned int size[1];
1093 void *data[1];
1094 struct smb2_file_full_ea_info *ea = NULL;
1095 struct kvec close_iov[1];
1096 int rc;
1097
1098 if (smb3_encryption_required(tcon))
1099 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001100
1101 if (ea_name_len > 255)
1102 return -EINVAL;
1103
1104 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1105 if (!utf16_path)
1106 return -ENOMEM;
1107
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001108 memset(rqst, 0, sizeof(rqst));
1109 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1110 memset(rsp_iov, 0, sizeof(rsp_iov));
1111
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001112 if (ses->server->ops->query_all_EAs) {
1113 if (!ea_value) {
1114 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1115 ea_name, NULL, 0,
1116 cifs_sb);
1117 if (rc == -ENODATA)
1118 goto sea_exit;
1119 }
1120 }
1121
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001122 /* Open */
1123 memset(&open_iov, 0, sizeof(open_iov));
1124 rqst[0].rq_iov = open_iov;
1125 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1126
1127 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001128 oparms.tcon = tcon;
1129 oparms.desired_access = FILE_WRITE_EA;
1130 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001131 if (backup_cred(cifs_sb))
1132 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1133 else
1134 oparms.create_options = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001135 oparms.fid = &fid;
1136 oparms.reconnect = false;
1137
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001138 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
1139 if (rc)
1140 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001141 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001142
1143
1144 /* Set Info */
1145 memset(&si_iov, 0, sizeof(si_iov));
1146 rqst[1].rq_iov = si_iov;
1147 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001148
1149 len = sizeof(ea) + ea_name_len + ea_value_len + 1;
1150 ea = kzalloc(len, GFP_KERNEL);
1151 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001152 rc = -ENOMEM;
1153 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001154 }
1155
1156 ea->ea_name_length = ea_name_len;
1157 ea->ea_value_length = cpu_to_le16(ea_value_len);
1158 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1159 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1160
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001161 size[0] = len;
1162 data[0] = ea;
1163
1164 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
1165 COMPOUND_FID, current->tgid,
1166 FILE_FULL_EA_INFORMATION,
1167 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001168 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001169 smb2_set_related(&rqst[1]);
1170
1171
1172 /* Close */
1173 memset(&close_iov, 0, sizeof(close_iov));
1174 rqst[2].rq_iov = close_iov;
1175 rqst[2].rq_nvec = 1;
1176 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
1177 smb2_set_related(&rqst[2]);
1178
1179 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1180 resp_buftype, rsp_iov);
Steve Frenchd2f15422019-09-22 00:55:46 -05001181 /* no need to bump num_remote_opens because handle immediately closed */
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001182
1183 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001184 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001185 kfree(utf16_path);
1186 SMB2_open_free(&rqst[0]);
1187 SMB2_set_info_free(&rqst[1]);
1188 SMB2_close_free(&rqst[2]);
1189 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1190 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1191 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001192 return rc;
1193}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001194#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001195
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001196static bool
1197smb2_can_echo(struct TCP_Server_Info *server)
1198{
1199 return server->echoes;
1200}
1201
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001202static void
1203smb2_clear_stats(struct cifs_tcon *tcon)
1204{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001205 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001206
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001207 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1208 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1209 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1210 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001211}
1212
1213static void
Steve French769ee6a2013-06-19 14:15:30 -05001214smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1215{
1216 seq_puts(m, "\n\tShare Capabilities:");
1217 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1218 seq_puts(m, " DFS,");
1219 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1220 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1221 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1222 seq_puts(m, " SCALEOUT,");
1223 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1224 seq_puts(m, " CLUSTER,");
1225 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1226 seq_puts(m, " ASYMMETRIC,");
1227 if (tcon->capabilities == 0)
1228 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001229 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1230 seq_puts(m, " Aligned,");
1231 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1232 seq_puts(m, " Partition Aligned,");
1233 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1234 seq_puts(m, " SSD,");
1235 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1236 seq_puts(m, " TRIM-support,");
1237
Steve French769ee6a2013-06-19 14:15:30 -05001238 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001239 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001240 if (tcon->perf_sector_size)
1241 seq_printf(m, "\tOptimal sector size: 0x%x",
1242 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001243 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001244}
1245
1246static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001247smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1248{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001249 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1250 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001251
1252 /*
1253 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1254 * totals (requests sent) since those SMBs are per-session not per tcon
1255 */
Steve French52ce1ac2018-07-31 01:46:47 -05001256 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1257 (long long)(tcon->bytes_read),
1258 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001259 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1260 atomic_read(&tcon->num_local_opens),
1261 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001262 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001263 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1264 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001265 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001266 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1267 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001268 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001269 atomic_read(&sent[SMB2_CREATE_HE]),
1270 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001271 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001272 atomic_read(&sent[SMB2_CLOSE_HE]),
1273 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001274 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001275 atomic_read(&sent[SMB2_FLUSH_HE]),
1276 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001277 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001278 atomic_read(&sent[SMB2_READ_HE]),
1279 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001280 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001281 atomic_read(&sent[SMB2_WRITE_HE]),
1282 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001283 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001284 atomic_read(&sent[SMB2_LOCK_HE]),
1285 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001286 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001287 atomic_read(&sent[SMB2_IOCTL_HE]),
1288 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001289 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001290 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1291 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001292 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001293 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1294 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001295 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001296 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1297 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001298 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001299 atomic_read(&sent[SMB2_SET_INFO_HE]),
1300 atomic_read(&failed[SMB2_SET_INFO_HE]));
1301 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1302 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1303 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001304}
1305
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001306static void
1307smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1308{
David Howells2b0143b2015-03-17 22:25:59 +00001309 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001310 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1311
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001312 cfile->fid.persistent_fid = fid->persistent_fid;
1313 cfile->fid.volatile_fid = fid->volatile_fid;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001314#ifdef CONFIG_CIFS_DEBUG2
1315 cfile->fid.mid = fid->mid;
1316#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001317 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1318 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001319 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001320 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001321}
1322
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001323static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001324smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1325 struct cifs_fid *fid)
1326{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001327 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001328}
1329
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001330static int
Steve French41c13582013-11-14 00:05:36 -06001331SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1332 u64 persistent_fid, u64 volatile_fid,
1333 struct copychunk_ioctl *pcchunk)
1334{
1335 int rc;
1336 unsigned int ret_data_len;
1337 struct resume_key_req *res_key;
1338
1339 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1340 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001341 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001342 (char **)&res_key, &ret_data_len);
1343
1344 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001345 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
Steve French41c13582013-11-14 00:05:36 -06001346 goto req_res_key_exit;
1347 }
1348 if (ret_data_len < sizeof(struct resume_key_req)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001349 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
Steve French41c13582013-11-14 00:05:36 -06001350 rc = -EINVAL;
1351 goto req_res_key_exit;
1352 }
1353 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1354
1355req_res_key_exit:
1356 kfree(res_key);
1357 return rc;
1358}
1359
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001360static int
1361smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001362 struct cifs_tcon *tcon,
1363 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001364 unsigned long p)
1365{
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001366 struct cifs_ses *ses = tcon->ses;
1367 char __user *arg = (char __user *)p;
1368 struct smb_query_info qi;
1369 struct smb_query_info __user *pqi;
1370 int rc = 0;
1371 int flags = 0;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001372 struct smb2_query_info_rsp *qi_rsp = NULL;
1373 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001374 void *buffer = NULL;
1375 struct smb_rqst rqst[3];
1376 int resp_buftype[3];
1377 struct kvec rsp_iov[3];
1378 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1379 struct cifs_open_parms oparms;
1380 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1381 struct cifs_fid fid;
1382 struct kvec qi_iov[1];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001383 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001384 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001385 struct kvec close_iov[1];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001386 unsigned int size[2];
1387 void *data[2];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001388
1389 memset(rqst, 0, sizeof(rqst));
1390 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1391 memset(rsp_iov, 0, sizeof(rsp_iov));
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001392
1393 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1394 return -EFAULT;
1395
1396 if (qi.output_buffer_length > 1024)
1397 return -EINVAL;
1398
1399 if (!ses || !(ses->server))
1400 return -EIO;
1401
1402 if (smb3_encryption_required(tcon))
1403 flags |= CIFS_TRANSFORM_REQ;
1404
Markus Elfringcfaa1182019-11-05 21:30:25 +01001405 buffer = memdup_user(arg + sizeof(struct smb_query_info),
1406 qi.output_buffer_length);
1407 if (IS_ERR(buffer))
1408 return PTR_ERR(buffer);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001409
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001410 /* Open */
1411 memset(&open_iov, 0, sizeof(open_iov));
1412 rqst[0].rq_iov = open_iov;
1413 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001414
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001415 memset(&oparms, 0, sizeof(oparms));
1416 oparms.tcon = tcon;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001417 oparms.disposition = FILE_OPEN;
1418 if (is_dir)
1419 oparms.create_options = CREATE_NOT_FILE;
1420 else
1421 oparms.create_options = CREATE_NOT_DIR;
1422 oparms.fid = &fid;
1423 oparms.reconnect = false;
1424
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001425 if (qi.flags & PASSTHRU_FSCTL) {
1426 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1427 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1428 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001429 break;
1430 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1431 oparms.desired_access = GENERIC_ALL;
1432 break;
1433 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1434 oparms.desired_access = GENERIC_READ;
1435 break;
1436 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1437 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001438 break;
1439 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001440 } else if (qi.flags & PASSTHRU_SET_INFO) {
1441 oparms.desired_access = GENERIC_WRITE;
1442 } else {
1443 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001444 }
1445
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001446 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1447 if (rc)
1448 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001449 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001450
1451 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001452 if (qi.flags & PASSTHRU_FSCTL) {
1453 /* Can eventually relax perm check since server enforces too */
1454 if (!capable(CAP_SYS_ADMIN))
1455 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001456 else {
1457 memset(&io_iov, 0, sizeof(io_iov));
1458 rqst[1].rq_iov = io_iov;
1459 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1460
1461 rc = SMB2_ioctl_init(tcon, &rqst[1],
1462 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001463 qi.info_type, true, buffer,
1464 qi.output_buffer_length,
1465 CIFSMaxBufSize);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001466 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001467 } else if (qi.flags == PASSTHRU_SET_INFO) {
1468 /* Can eventually relax perm check since server enforces too */
1469 if (!capable(CAP_SYS_ADMIN))
1470 rc = -EPERM;
1471 else {
1472 memset(&si_iov, 0, sizeof(si_iov));
1473 rqst[1].rq_iov = si_iov;
1474 rqst[1].rq_nvec = 1;
1475
1476 size[0] = 8;
1477 data[0] = buffer;
1478
1479 rc = SMB2_set_info_init(tcon, &rqst[1],
1480 COMPOUND_FID, COMPOUND_FID,
1481 current->tgid,
1482 FILE_END_OF_FILE_INFORMATION,
1483 SMB2_O_INFO_FILE, 0, data, size);
1484 }
Steve French31ba4332019-03-13 02:40:07 -05001485 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1486 memset(&qi_iov, 0, sizeof(qi_iov));
1487 rqst[1].rq_iov = qi_iov;
1488 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001489
Steve French31ba4332019-03-13 02:40:07 -05001490 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
1491 COMPOUND_FID, qi.file_info_class,
1492 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001493 qi.input_buffer_length,
1494 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001495 } else { /* unknown flags */
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001496 cifs_tcon_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
Steve French31ba4332019-03-13 02:40:07 -05001497 rc = -EINVAL;
1498 }
1499
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001500 if (rc)
1501 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001502 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001503 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001504
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001505 /* Close */
1506 memset(&close_iov, 0, sizeof(close_iov));
1507 rqst[2].rq_iov = close_iov;
1508 rqst[2].rq_nvec = 1;
1509
1510 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001511 if (rc)
1512 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001513 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001514
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001515 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1516 resp_buftype, rsp_iov);
1517 if (rc)
1518 goto iqinf_exit;
Steve Frenchd2f15422019-09-22 00:55:46 -05001519
1520 /* No need to bump num_remote_opens since handle immediately closed */
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001521 if (qi.flags & PASSTHRU_FSCTL) {
1522 pqi = (struct smb_query_info __user *)arg;
1523 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1524 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1525 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001526 if (qi.input_buffer_length > 0 &&
Markus Elfring2b1116b2019-11-05 22:26:53 +01001527 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1528 > rsp_iov[1].iov_len)
1529 goto e_fault;
1530
1531 if (copy_to_user(&pqi->input_buffer_length,
1532 &qi.input_buffer_length,
1533 sizeof(qi.input_buffer_length)))
1534 goto e_fault;
1535
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001536 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1537 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
Markus Elfring2b1116b2019-11-05 22:26:53 +01001538 qi.input_buffer_length))
1539 goto e_fault;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001540 } else {
1541 pqi = (struct smb_query_info __user *)arg;
1542 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1543 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1544 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
Markus Elfring2b1116b2019-11-05 22:26:53 +01001545 if (copy_to_user(&pqi->input_buffer_length,
1546 &qi.input_buffer_length,
1547 sizeof(qi.input_buffer_length)))
1548 goto e_fault;
1549
1550 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1551 qi.input_buffer_length))
1552 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001553 }
1554
1555 iqinf_exit:
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001556 kfree(buffer);
1557 SMB2_open_free(&rqst[0]);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001558 if (qi.flags & PASSTHRU_FSCTL)
1559 SMB2_ioctl_free(&rqst[1]);
1560 else
1561 SMB2_query_info_free(&rqst[1]);
1562
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001563 SMB2_close_free(&rqst[2]);
1564 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1565 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1566 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001567 return rc;
Markus Elfring2b1116b2019-11-05 22:26:53 +01001568
1569e_fault:
1570 rc = -EFAULT;
1571 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001572}
1573
Sachin Prabhu620d8742017-02-10 16:03:51 +05301574static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001575smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001576 struct cifsFileInfo *srcfile,
1577 struct cifsFileInfo *trgtfile, u64 src_off,
1578 u64 len, u64 dest_off)
1579{
1580 int rc;
1581 unsigned int ret_data_len;
1582 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001583 struct copychunk_ioctl_rsp *retbuf = NULL;
1584 struct cifs_tcon *tcon;
1585 int chunks_copied = 0;
1586 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301587 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001588
1589 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1590
1591 if (pcchunk == NULL)
1592 return -ENOMEM;
1593
Christoph Probsta205d502019-05-08 21:36:25 +02001594 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001595 /* Request a key from the server to identify the source of the copy */
1596 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1597 srcfile->fid.persistent_fid,
1598 srcfile->fid.volatile_fid, pcchunk);
1599
1600 /* Note: request_res_key sets res_key null only if rc !=0 */
1601 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001602 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001603
1604 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001605 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001606 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001607 pcchunk->Reserved2 = 0;
1608
Steve French9bf0c9c2013-11-16 18:05:28 -06001609 tcon = tlink_tcon(trgtfile->tlink);
1610
1611 while (len > 0) {
1612 pcchunk->SourceOffset = cpu_to_le64(src_off);
1613 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1614 pcchunk->Length =
1615 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1616
1617 /* Request server copy to target from src identified by key */
1618 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001619 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001620 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001621 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1622 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001623 if (rc == 0) {
1624 if (ret_data_len !=
1625 sizeof(struct copychunk_ioctl_rsp)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001626 cifs_tcon_dbg(VFS, "invalid cchunk response size\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001627 rc = -EIO;
1628 goto cchunk_out;
1629 }
1630 if (retbuf->TotalBytesWritten == 0) {
1631 cifs_dbg(FYI, "no bytes copied\n");
1632 rc = -EIO;
1633 goto cchunk_out;
1634 }
1635 /*
1636 * Check if server claimed to write more than we asked
1637 */
1638 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1639 le32_to_cpu(pcchunk->Length)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001640 cifs_tcon_dbg(VFS, "invalid copy chunk response\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001641 rc = -EIO;
1642 goto cchunk_out;
1643 }
1644 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001645 cifs_tcon_dbg(VFS, "invalid num chunks written\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001646 rc = -EIO;
1647 goto cchunk_out;
1648 }
1649 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001650
Sachin Prabhu620d8742017-02-10 16:03:51 +05301651 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1652 src_off += bytes_written;
1653 dest_off += bytes_written;
1654 len -= bytes_written;
1655 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001656
Sachin Prabhu620d8742017-02-10 16:03:51 +05301657 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001658 le32_to_cpu(retbuf->ChunksWritten),
1659 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301660 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001661 } else if (rc == -EINVAL) {
1662 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1663 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001664
Steve French9bf0c9c2013-11-16 18:05:28 -06001665 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1666 le32_to_cpu(retbuf->ChunksWritten),
1667 le32_to_cpu(retbuf->ChunkBytesWritten),
1668 le32_to_cpu(retbuf->TotalBytesWritten));
1669
1670 /*
1671 * Check if this is the first request using these sizes,
1672 * (ie check if copy succeed once with original sizes
1673 * and check if the server gave us different sizes after
1674 * we already updated max sizes on previous request).
1675 * if not then why is the server returning an error now
1676 */
1677 if ((chunks_copied != 0) || chunk_sizes_updated)
1678 goto cchunk_out;
1679
1680 /* Check that server is not asking us to grow size */
1681 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1682 tcon->max_bytes_chunk)
1683 tcon->max_bytes_chunk =
1684 le32_to_cpu(retbuf->ChunkBytesWritten);
1685 else
1686 goto cchunk_out; /* server gave us bogus size */
1687
1688 /* No need to change MaxChunks since already set to 1 */
1689 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001690 } else
1691 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001692 }
1693
1694cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001695 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001696 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301697 if (rc)
1698 return rc;
1699 else
1700 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001701}
1702
1703static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001704smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1705 struct cifs_fid *fid)
1706{
1707 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1708}
1709
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001710static unsigned int
1711smb2_read_data_offset(char *buf)
1712{
1713 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001714
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001715 return rsp->DataOffset;
1716}
1717
1718static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001719smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001720{
1721 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001722
1723 if (in_remaining)
1724 return le32_to_cpu(rsp->DataRemaining);
1725
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001726 return le32_to_cpu(rsp->DataLength);
1727}
1728
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001729
1730static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001731smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001732 struct cifs_io_parms *parms, unsigned int *bytes_read,
1733 char **buf, int *buf_type)
1734{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001735 parms->persistent_fid = pfid->persistent_fid;
1736 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001737 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1738}
1739
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001740static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001741smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001742 struct cifs_io_parms *parms, unsigned int *written,
1743 struct kvec *iov, unsigned long nr_segs)
1744{
1745
Steve Frenchdb8b6312014-09-22 05:13:55 -05001746 parms->persistent_fid = pfid->persistent_fid;
1747 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001748 return SMB2_write(xid, parms, written, iov, nr_segs);
1749}
1750
Steve Frenchd43cc792014-08-13 17:16:29 -05001751/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1752static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1753 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1754{
1755 struct cifsInodeInfo *cifsi;
1756 int rc;
1757
1758 cifsi = CIFS_I(inode);
1759
1760 /* if file already sparse don't bother setting sparse again */
1761 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1762 return true; /* already sparse */
1763
1764 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1765 return true; /* already not sparse */
1766
1767 /*
1768 * Can't check for sparse support on share the usual way via the
1769 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1770 * since Samba server doesn't set the flag on the share, yet
1771 * supports the set sparse FSCTL and returns sparse correctly
1772 * in the file attributes. If we fail setting sparse though we
1773 * mark that server does not support sparse files for this share
1774 * to avoid repeatedly sending the unsupported fsctl to server
1775 * if the file is repeatedly extended.
1776 */
1777 if (tcon->broken_sparse_sup)
1778 return false;
1779
1780 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1781 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001782 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05001783 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001784 if (rc) {
1785 tcon->broken_sparse_sup = true;
1786 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1787 return false;
1788 }
1789
1790 if (setsparse)
1791 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1792 else
1793 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1794
1795 return true;
1796}
1797
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001798static int
1799smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1800 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1801{
1802 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001803 struct inode *inode;
1804
1805 /*
1806 * If extending file more than one page make sparse. Many Linux fs
1807 * make files sparse by default when extending via ftruncate
1808 */
David Howells2b0143b2015-03-17 22:25:59 +00001809 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001810
1811 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001812 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001813
Steve Frenchd43cc792014-08-13 17:16:29 -05001814 /* whether set sparse succeeds or not, extend the file */
1815 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001816 }
1817
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001818 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001819 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001820}
1821
Steve French02b16662015-06-27 21:18:36 -07001822static int
1823smb2_duplicate_extents(const unsigned int xid,
1824 struct cifsFileInfo *srcfile,
1825 struct cifsFileInfo *trgtfile, u64 src_off,
1826 u64 len, u64 dest_off)
1827{
1828 int rc;
1829 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001830 struct duplicate_extents_to_file dup_ext_buf;
1831 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1832
1833 /* server fileays advertise duplicate extent support with this flag */
1834 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1835 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1836 return -EOPNOTSUPP;
1837
1838 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1839 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1840 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1841 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1842 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02001843 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07001844 src_off, dest_off, len);
1845
1846 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1847 if (rc)
1848 goto duplicate_extents_out;
1849
1850 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1851 trgtfile->fid.volatile_fid,
1852 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001853 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001854 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001855 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05001856 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07001857 &ret_data_len);
1858
1859 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02001860 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07001861
1862duplicate_extents_out:
1863 return rc;
1864}
Steve French02b16662015-06-27 21:18:36 -07001865
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001866static int
Steve French64a5cfa2013-10-14 15:31:32 -05001867smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1868 struct cifsFileInfo *cfile)
1869{
1870 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1871 cfile->fid.volatile_fid);
1872}
1873
1874static int
Steve Frenchb3152e22015-06-24 03:17:02 -05001875smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1876 struct cifsFileInfo *cfile)
1877{
1878 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05001879 unsigned int ret_data_len;
1880
1881 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1882 integr_info.Flags = 0;
1883 integr_info.Reserved = 0;
1884
1885 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1886 cfile->fid.volatile_fid,
1887 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001888 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001889 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05001890 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05001891 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05001892 &ret_data_len);
1893
1894}
1895
Steve Frenche02789a2018-08-09 14:33:12 -05001896/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1897#define GMT_TOKEN_SIZE 50
1898
Steve French153322f2019-03-28 22:32:49 -05001899#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1900
Steve Frenche02789a2018-08-09 14:33:12 -05001901/*
1902 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1903 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1904 */
Steve Frenchb3152e22015-06-24 03:17:02 -05001905static int
Steve French834170c2016-09-30 21:14:26 -05001906smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1907 struct cifsFileInfo *cfile, void __user *ioc_buf)
1908{
1909 char *retbuf = NULL;
1910 unsigned int ret_data_len = 0;
1911 int rc;
Steve French153322f2019-03-28 22:32:49 -05001912 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05001913 struct smb_snapshot_array snapshot_in;
1914
Steve French973189a2019-04-04 00:41:04 -05001915 /*
1916 * On the first query to enumerate the list of snapshots available
1917 * for this volume the buffer begins with 0 (number of snapshots
1918 * which can be returned is zero since at that point we do not know
1919 * how big the buffer needs to be). On the second query,
1920 * it (ret_data_len) is set to number of snapshots so we can
1921 * know to set the maximum response size larger (see below).
1922 */
Steve French153322f2019-03-28 22:32:49 -05001923 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
1924 return -EFAULT;
1925
1926 /*
1927 * Note that for snapshot queries that servers like Azure expect that
1928 * the first query be minimal size (and just used to get the number/size
1929 * of previous versions) so response size must be specified as EXACTLY
1930 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
1931 * of eight bytes.
1932 */
1933 if (ret_data_len == 0)
1934 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
1935 else
1936 max_response_size = CIFSMaxBufSize;
1937
Steve French834170c2016-09-30 21:14:26 -05001938 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1939 cfile->fid.volatile_fid,
1940 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001941 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001942 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05001943 (char **)&retbuf,
1944 &ret_data_len);
1945 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
1946 rc, ret_data_len);
1947 if (rc)
1948 return rc;
1949
1950 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
1951 /* Fixup buffer */
1952 if (copy_from_user(&snapshot_in, ioc_buf,
1953 sizeof(struct smb_snapshot_array))) {
1954 rc = -EFAULT;
1955 kfree(retbuf);
1956 return rc;
1957 }
Steve French834170c2016-09-30 21:14:26 -05001958
Steve Frenche02789a2018-08-09 14:33:12 -05001959 /*
1960 * Check for min size, ie not large enough to fit even one GMT
1961 * token (snapshot). On the first ioctl some users may pass in
1962 * smaller size (or zero) to simply get the size of the array
1963 * so the user space caller can allocate sufficient memory
1964 * and retry the ioctl again with larger array size sufficient
1965 * to hold all of the snapshot GMT tokens on the second try.
1966 */
1967 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
1968 ret_data_len = sizeof(struct smb_snapshot_array);
1969
1970 /*
1971 * We return struct SRV_SNAPSHOT_ARRAY, followed by
1972 * the snapshot array (of 50 byte GMT tokens) each
1973 * representing an available previous version of the data
1974 */
1975 if (ret_data_len > (snapshot_in.snapshot_array_size +
1976 sizeof(struct smb_snapshot_array)))
1977 ret_data_len = snapshot_in.snapshot_array_size +
1978 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05001979
1980 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
1981 rc = -EFAULT;
1982 }
1983
1984 kfree(retbuf);
1985 return rc;
1986}
1987
1988static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001989smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
1990 const char *path, struct cifs_sb_info *cifs_sb,
1991 struct cifs_fid *fid, __u16 search_flags,
1992 struct cifs_search_info *srch_inf)
1993{
1994 __le16 *utf16_path;
1995 int rc;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001996 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001997 struct cifs_open_parms oparms;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001998
1999 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2000 if (!utf16_path)
2001 return -ENOMEM;
2002
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002003 oparms.tcon = tcon;
2004 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2005 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05002006 if (backup_cred(cifs_sb))
2007 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2008 else
2009 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002010 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002011 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002012
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002013 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002014 kfree(utf16_path);
2015 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07002016 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002017 return rc;
2018 }
2019
2020 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02002021 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002022
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002023 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
2024 fid->volatile_fid, 0, srch_inf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002025 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07002026 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002027 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002028 }
2029 return rc;
2030}
2031
2032static int
2033smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2034 struct cifs_fid *fid, __u16 search_flags,
2035 struct cifs_search_info *srch_inf)
2036{
2037 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2038 fid->volatile_fid, 0, srch_inf);
2039}
2040
2041static int
2042smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2043 struct cifs_fid *fid)
2044{
2045 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2046}
2047
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002048/*
Christoph Probsta205d502019-05-08 21:36:25 +02002049 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2050 * the number of credits and return true. Otherwise - return false.
2051 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002052static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002053smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002054{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002055 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002056
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002057 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002058 return false;
2059
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002060 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002061 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002062 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002063 spin_unlock(&server->req_lock);
2064 wake_up(&server->request_q);
2065 }
2066
2067 return true;
2068}
2069
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002070static bool
2071smb2_is_session_expired(char *buf)
2072{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002073 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002074
Mark Symsd81243c2018-05-24 09:47:31 +01002075 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2076 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002077 return false;
2078
Steve Frenche68a9322018-07-30 14:23:58 -05002079 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2080 le16_to_cpu(shdr->Command),
2081 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002082 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002083
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002084 return true;
2085}
2086
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002087static int
2088smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2089 struct cifsInodeInfo *cinode)
2090{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002091 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2092 return SMB2_lease_break(0, tcon, cinode->lease_key,
2093 smb2_get_lease_state(cinode));
2094
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002095 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2096 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002097 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002098}
2099
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002100void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002101smb2_set_related(struct smb_rqst *rqst)
2102{
2103 struct smb2_sync_hdr *shdr;
2104
2105 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002106 if (shdr == NULL) {
2107 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2108 return;
2109 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002110 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2111}
2112
2113char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2114
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002115void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002116smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002117{
2118 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002119 struct cifs_ses *ses = tcon->ses;
2120 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002121 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002122 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002123
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002124 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2125 if (shdr == NULL) {
2126 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2127 return;
2128 }
2129
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002130 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002131
2132 /* No padding needed */
2133 if (!(len & 7))
2134 goto finished;
2135
2136 num_padding = 8 - (len & 7);
2137 if (!smb3_encryption_required(tcon)) {
2138 /*
2139 * If we do not have encryption then we can just add an extra
2140 * iov for the padding.
2141 */
2142 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2143 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2144 rqst->rq_nvec++;
2145 len += num_padding;
2146 } else {
2147 /*
2148 * We can not add a small padding iov for the encryption case
2149 * because the encryption framework can not handle the padding
2150 * iovs.
2151 * We have to flatten this into a single buffer and add
2152 * the padding to it.
2153 */
2154 for (i = 1; i < rqst->rq_nvec; i++) {
2155 memcpy(rqst->rq_iov[0].iov_base +
2156 rqst->rq_iov[0].iov_len,
2157 rqst->rq_iov[i].iov_base,
2158 rqst->rq_iov[i].iov_len);
2159 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002160 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002161 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2162 0, num_padding);
2163 rqst->rq_iov[0].iov_len += num_padding;
2164 len += num_padding;
2165 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002166 }
2167
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002168 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002169 shdr->NextCommand = cpu_to_le32(len);
2170}
2171
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002172/*
2173 * Passes the query info response back to the caller on success.
2174 * Caller need to free this with free_rsp_buf().
2175 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002176int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002177smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2178 __le16 *utf16_path, u32 desired_access,
2179 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002180 struct kvec *rsp, int *buftype,
2181 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002182{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002183 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002184 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002185 struct smb_rqst rqst[3];
2186 int resp_buftype[3];
2187 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002188 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002189 struct kvec qi_iov[1];
2190 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002191 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002192 struct cifs_open_parms oparms;
2193 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002194 int rc;
2195
2196 if (smb3_encryption_required(tcon))
2197 flags |= CIFS_TRANSFORM_REQ;
2198
2199 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002200 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002201 memset(rsp_iov, 0, sizeof(rsp_iov));
2202
2203 memset(&open_iov, 0, sizeof(open_iov));
2204 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002205 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002206
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002207 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002208 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002209 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002210 if (cifs_sb && backup_cred(cifs_sb))
2211 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2212 else
2213 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002214 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002215 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002216
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002217 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002218 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002219 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002220 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002221
2222 memset(&qi_iov, 0, sizeof(qi_iov));
2223 rqst[1].rq_iov = qi_iov;
2224 rqst[1].rq_nvec = 1;
2225
2226 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002227 class, type, 0,
2228 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002229 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002230 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002231 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002232 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002233 smb2_set_related(&rqst[1]);
2234
2235 memset(&close_iov, 0, sizeof(close_iov));
2236 rqst[2].rq_iov = close_iov;
2237 rqst[2].rq_nvec = 1;
2238
2239 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2240 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002241 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002242 smb2_set_related(&rqst[2]);
2243
2244 rc = compound_send_recv(xid, ses, flags, 3, rqst,
2245 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002246 if (rc) {
2247 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French7dcc82c2019-09-11 00:07:36 -05002248 if (rc == -EREMCHG) {
2249 tcon->need_reconnect = true;
2250 printk_once(KERN_WARNING "server share %s deleted\n",
2251 tcon->treeName);
2252 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002253 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002254 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002255 *rsp = rsp_iov[1];
2256 *buftype = resp_buftype[1];
2257
2258 qic_exit:
2259 SMB2_open_free(&rqst[0]);
2260 SMB2_query_info_free(&rqst[1]);
2261 SMB2_close_free(&rqst[2]);
2262 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2263 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2264 return rc;
2265}
2266
2267static int
2268smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2269 struct kstatfs *buf)
2270{
2271 struct smb2_query_info_rsp *rsp;
2272 struct smb2_fs_full_size_info *info = NULL;
2273 __le16 utf16_path = 0; /* Null - open root of share */
2274 struct kvec rsp_iov = {NULL, 0};
2275 int buftype = CIFS_NO_BUFFER;
2276 int rc;
2277
2278
2279 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2280 FILE_READ_ATTRIBUTES,
2281 FS_FULL_SIZE_INFORMATION,
2282 SMB2_O_INFO_FILESYSTEM,
2283 sizeof(struct smb2_fs_full_size_info),
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002284 &rsp_iov, &buftype, NULL);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002285 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002286 goto qfs_exit;
2287
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002288 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002289 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002290 info = (struct smb2_fs_full_size_info *)(
2291 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2292 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2293 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002294 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002295 sizeof(struct smb2_fs_full_size_info));
2296 if (!rc)
2297 smb2_copy_fs_info_to_kstatfs(info, buf);
2298
2299qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002300 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002301 return rc;
2302}
2303
Steve French2d304212018-06-24 23:28:12 -05002304static int
2305smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2306 struct kstatfs *buf)
2307{
2308 int rc;
2309 __le16 srch_path = 0; /* Null - open root of share */
2310 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2311 struct cifs_open_parms oparms;
2312 struct cifs_fid fid;
2313
2314 if (!tcon->posix_extensions)
2315 return smb2_queryfs(xid, tcon, buf);
2316
2317 oparms.tcon = tcon;
2318 oparms.desired_access = FILE_READ_ATTRIBUTES;
2319 oparms.disposition = FILE_OPEN;
2320 oparms.create_options = 0;
2321 oparms.fid = &fid;
2322 oparms.reconnect = false;
2323
2324 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
2325 if (rc)
2326 return rc;
2327
2328 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2329 fid.volatile_fid, buf);
2330 buf->f_type = SMB2_MAGIC_NUMBER;
2331 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2332 return rc;
2333}
Steve French2d304212018-06-24 23:28:12 -05002334
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002335static bool
2336smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2337{
2338 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2339 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2340}
2341
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002342static int
2343smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2344 __u64 length, __u32 type, int lock, int unlock, bool wait)
2345{
2346 if (unlock && !lock)
2347 type = SMB2_LOCKFLAG_UNLOCK;
2348 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2349 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2350 current->tgid, length, offset, type, wait);
2351}
2352
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002353static void
2354smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2355{
2356 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2357}
2358
2359static void
2360smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2361{
2362 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2363}
2364
2365static void
2366smb2_new_lease_key(struct cifs_fid *fid)
2367{
Steve Frenchfa70b872016-09-22 00:39:34 -05002368 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002369}
2370
Aurelien Aptel9d496402017-02-13 16:16:49 +01002371static int
2372smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2373 const char *search_name,
2374 struct dfs_info3_param **target_nodes,
2375 unsigned int *num_of_nodes,
2376 const struct nls_table *nls_codepage, int remap)
2377{
2378 int rc;
2379 __le16 *utf16_path = NULL;
2380 int utf16_path_len = 0;
2381 struct cifs_tcon *tcon;
2382 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2383 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2384 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2385
Christoph Probsta205d502019-05-08 21:36:25 +02002386 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002387
2388 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002389 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002390 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002391 tcon = ses->tcon_ipc;
2392 if (tcon == NULL) {
2393 spin_lock(&cifs_tcp_ses_lock);
2394 tcon = list_first_entry_or_null(&ses->tcon_list,
2395 struct cifs_tcon,
2396 tcon_list);
2397 if (tcon)
2398 tcon->tc_count++;
2399 spin_unlock(&cifs_tcp_ses_lock);
2400 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002401
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002402 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002403 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2404 ses);
2405 rc = -ENOTCONN;
2406 goto out;
2407 }
2408
2409 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2410 &utf16_path_len,
2411 nls_codepage, remap);
2412 if (!utf16_path) {
2413 rc = -ENOMEM;
2414 goto out;
2415 }
2416
2417 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2418 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2419 if (!dfs_req) {
2420 rc = -ENOMEM;
2421 goto out;
2422 }
2423
2424 /* Highest DFS referral version understood */
2425 dfs_req->MaxReferralLevel = DFS_VERSION;
2426
2427 /* Path to resolve in an UTF-16 null-terminated string */
2428 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2429
2430 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002431 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2432 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002433 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002434 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002435 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002436 } while (rc == -EAGAIN);
2437
2438 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002439 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002440 cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002441 goto out;
2442 }
2443
2444 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2445 num_of_nodes, target_nodes,
2446 nls_codepage, remap, search_name,
2447 true /* is_unicode */);
2448 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002449 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002450 goto out;
2451 }
2452
2453 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002454 if (tcon && !tcon->ipc) {
2455 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002456 spin_lock(&cifs_tcp_ses_lock);
2457 tcon->tc_count--;
2458 spin_unlock(&cifs_tcp_ses_lock);
2459 }
2460 kfree(utf16_path);
2461 kfree(dfs_req);
2462 kfree(dfs_rsp);
2463 return rc;
2464}
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002465
2466static int
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002467parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2468 u32 plen, char **target_path,
2469 struct cifs_sb_info *cifs_sb)
2470{
2471 unsigned int len;
2472
2473 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2474 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2475
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002476 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2477 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2478 le64_to_cpu(symlink_buf->InodeType));
2479 return -EOPNOTSUPP;
2480 }
2481
2482 *target_path = cifs_strndup_from_utf16(
2483 symlink_buf->PathBuffer,
2484 len, true, cifs_sb->local_nls);
2485 if (!(*target_path))
2486 return -ENOMEM;
2487
2488 convert_delimiter(*target_path, '/');
2489 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2490
2491 return 0;
2492}
2493
2494static int
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002495parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2496 u32 plen, char **target_path,
2497 struct cifs_sb_info *cifs_sb)
2498{
2499 unsigned int sub_len;
2500 unsigned int sub_offset;
2501
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002502 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002503
2504 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2505 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2506 if (sub_offset + 20 > plen ||
2507 sub_offset + sub_len + 20 > plen) {
2508 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2509 return -EIO;
2510 }
2511
2512 *target_path = cifs_strndup_from_utf16(
2513 symlink_buf->PathBuffer + sub_offset,
2514 sub_len, true, cifs_sb->local_nls);
2515 if (!(*target_path))
2516 return -ENOMEM;
2517
2518 convert_delimiter(*target_path, '/');
2519 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2520
2521 return 0;
2522}
2523
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002524static int
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002525parse_reparse_point(struct reparse_data_buffer *buf,
2526 u32 plen, char **target_path,
2527 struct cifs_sb_info *cifs_sb)
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002528{
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002529 if (plen < sizeof(struct reparse_data_buffer)) {
2530 cifs_dbg(VFS, "reparse buffer is too small. Must be "
2531 "at least 8 bytes but was %d\n", plen);
2532 return -EIO;
2533 }
2534
2535 if (plen < le16_to_cpu(buf->ReparseDataLength) +
2536 sizeof(struct reparse_data_buffer)) {
2537 cifs_dbg(VFS, "srv returned invalid reparse buf "
2538 "length: %d\n", plen);
2539 return -EIO;
2540 }
2541
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002542 /* See MS-FSCC 2.1.2 */
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002543 switch (le32_to_cpu(buf->ReparseTag)) {
2544 case IO_REPARSE_TAG_NFS:
2545 return parse_reparse_posix(
2546 (struct reparse_posix_data *)buf,
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002547 plen, target_path, cifs_sb);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002548 case IO_REPARSE_TAG_SYMLINK:
2549 return parse_reparse_symlink(
2550 (struct reparse_symlink_data_buffer *)buf,
2551 plen, target_path, cifs_sb);
2552 default:
2553 cifs_dbg(VFS, "srv returned unknown symlink buffer "
2554 "tag:0x%08x\n", le32_to_cpu(buf->ReparseTag));
2555 return -EOPNOTSUPP;
2556 }
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002557}
2558
Pavel Shilovsky78932422016-07-24 10:37:38 +03002559#define SMB2_SYMLINK_STRUCT_SIZE \
2560 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2561
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002562static int
2563smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002564 struct cifs_sb_info *cifs_sb, const char *full_path,
2565 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002566{
2567 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002568 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002569 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2570 struct cifs_open_parms oparms;
2571 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002572 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002573 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002574 struct smb2_symlink_err_rsp *symlink;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002575 unsigned int sub_len;
2576 unsigned int sub_offset;
2577 unsigned int print_len;
2578 unsigned int print_offset;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002579 int flags = 0;
2580 struct smb_rqst rqst[3];
2581 int resp_buftype[3];
2582 struct kvec rsp_iov[3];
2583 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2584 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
2585 struct kvec close_iov[1];
2586 struct smb2_create_rsp *create_rsp;
2587 struct smb2_ioctl_rsp *ioctl_rsp;
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002588 struct reparse_data_buffer *reparse_buf;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002589 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002590
2591 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2592
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002593 *target_path = NULL;
2594
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002595 if (smb3_encryption_required(tcon))
2596 flags |= CIFS_TRANSFORM_REQ;
2597
2598 memset(rqst, 0, sizeof(rqst));
2599 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2600 memset(rsp_iov, 0, sizeof(rsp_iov));
2601
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002602 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2603 if (!utf16_path)
2604 return -ENOMEM;
2605
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002606 /* Open */
2607 memset(&open_iov, 0, sizeof(open_iov));
2608 rqst[0].rq_iov = open_iov;
2609 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2610
2611 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002612 oparms.tcon = tcon;
2613 oparms.desired_access = FILE_READ_ATTRIBUTES;
2614 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002615
Steve French5e196972018-08-27 17:04:13 -05002616 if (backup_cred(cifs_sb))
2617 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2618 else
2619 oparms.create_options = 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002620 if (is_reparse_point)
2621 oparms.create_options = OPEN_REPARSE_POINT;
2622
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002623 oparms.fid = &fid;
2624 oparms.reconnect = false;
2625
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002626 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
2627 if (rc)
2628 goto querty_exit;
2629 smb2_set_next_command(tcon, &rqst[0]);
2630
2631
2632 /* IOCTL */
2633 memset(&io_iov, 0, sizeof(io_iov));
2634 rqst[1].rq_iov = io_iov;
2635 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
2636
2637 rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
2638 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
2639 true /* is_fctl */, NULL, 0, CIFSMaxBufSize);
2640 if (rc)
2641 goto querty_exit;
2642
2643 smb2_set_next_command(tcon, &rqst[1]);
2644 smb2_set_related(&rqst[1]);
2645
2646
2647 /* Close */
2648 memset(&close_iov, 0, sizeof(close_iov));
2649 rqst[2].rq_iov = close_iov;
2650 rqst[2].rq_nvec = 1;
2651
2652 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2653 if (rc)
2654 goto querty_exit;
2655
2656 smb2_set_related(&rqst[2]);
2657
2658 rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
2659 resp_buftype, rsp_iov);
2660
2661 create_rsp = rsp_iov[0].iov_base;
2662 if (create_rsp && create_rsp->sync_hdr.Status)
2663 err_iov = rsp_iov[0];
2664 ioctl_rsp = rsp_iov[1].iov_base;
2665
2666 /*
2667 * Open was successful and we got an ioctl response.
2668 */
2669 if ((rc == 0) && (is_reparse_point)) {
2670 /* See MS-FSCC 2.3.23 */
2671
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002672 reparse_buf = (struct reparse_data_buffer *)
2673 ((char *)ioctl_rsp +
2674 le32_to_cpu(ioctl_rsp->OutputOffset));
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002675 plen = le32_to_cpu(ioctl_rsp->OutputCount);
2676
2677 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
2678 rsp_iov[1].iov_len) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002679 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002680 plen);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002681 rc = -EIO;
2682 goto querty_exit;
2683 }
2684
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002685 rc = parse_reparse_point(reparse_buf, plen, target_path,
2686 cifs_sb);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002687 goto querty_exit;
2688 }
2689
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002690 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002691 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002692 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002693 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002694
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002695 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002696 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002697 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002698 rc = -EINVAL;
2699 goto querty_exit;
2700 }
2701
2702 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2703 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
2704 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
2705 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002706 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002707 }
2708
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002709 /* open must fail on symlink - reset rc */
2710 rc = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002711 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2712 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002713 print_len = le16_to_cpu(symlink->PrintNameLength);
2714 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2715
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002716 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002717 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002718 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002719 }
2720
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002721 if (err_iov.iov_len <
2722 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002723 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002724 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002725 }
2726
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002727 *target_path = cifs_strndup_from_utf16(
2728 (char *)symlink->PathBuffer + sub_offset,
2729 sub_len, true, cifs_sb->local_nls);
2730 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002731 rc = -ENOMEM;
2732 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002733 }
2734 convert_delimiter(*target_path, '/');
2735 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002736
2737 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002738 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002739 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002740 SMB2_open_free(&rqst[0]);
2741 SMB2_ioctl_free(&rqst[1]);
2742 SMB2_close_free(&rqst[2]);
2743 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2744 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2745 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002746 return rc;
2747}
2748
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002749static struct cifs_ntsd *
2750get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2751 const struct cifs_fid *cifsfid, u32 *pacllen)
2752{
2753 struct cifs_ntsd *pntsd = NULL;
2754 unsigned int xid;
2755 int rc = -EOPNOTSUPP;
2756 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2757
2758 if (IS_ERR(tlink))
2759 return ERR_CAST(tlink);
2760
2761 xid = get_xid();
2762 cifs_dbg(FYI, "trying to get acl\n");
2763
2764 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2765 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2766 free_xid(xid);
2767
2768 cifs_put_tlink(tlink);
2769
2770 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2771 if (rc)
2772 return ERR_PTR(rc);
2773 return pntsd;
2774
2775}
2776
2777static struct cifs_ntsd *
2778get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2779 const char *path, u32 *pacllen)
2780{
2781 struct cifs_ntsd *pntsd = NULL;
2782 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2783 unsigned int xid;
2784 int rc;
2785 struct cifs_tcon *tcon;
2786 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2787 struct cifs_fid fid;
2788 struct cifs_open_parms oparms;
2789 __le16 *utf16_path;
2790
2791 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
2792 if (IS_ERR(tlink))
2793 return ERR_CAST(tlink);
2794
2795 tcon = tlink_tcon(tlink);
2796 xid = get_xid();
2797
2798 if (backup_cred(cifs_sb))
Colin Ian King709340a2017-07-05 13:47:34 +01002799 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002800 else
2801 oparms.create_options = 0;
2802
2803 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002804 if (!utf16_path) {
2805 rc = -ENOMEM;
2806 free_xid(xid);
2807 return ERR_PTR(rc);
2808 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002809
2810 oparms.tcon = tcon;
2811 oparms.desired_access = READ_CONTROL;
2812 oparms.disposition = FILE_OPEN;
2813 oparms.fid = &fid;
2814 oparms.reconnect = false;
2815
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002816 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002817 kfree(utf16_path);
2818 if (!rc) {
2819 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2820 fid.volatile_fid, (void **)&pntsd, pacllen);
2821 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2822 }
2823
2824 cifs_put_tlink(tlink);
2825 free_xid(xid);
2826
2827 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2828 if (rc)
2829 return ERR_PTR(rc);
2830 return pntsd;
2831}
2832
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002833static int
2834set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2835 struct inode *inode, const char *path, int aclflag)
2836{
2837 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2838 unsigned int xid;
2839 int rc, access_flags = 0;
2840 struct cifs_tcon *tcon;
2841 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2842 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2843 struct cifs_fid fid;
2844 struct cifs_open_parms oparms;
2845 __le16 *utf16_path;
2846
2847 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
2848 if (IS_ERR(tlink))
2849 return PTR_ERR(tlink);
2850
2851 tcon = tlink_tcon(tlink);
2852 xid = get_xid();
2853
2854 if (backup_cred(cifs_sb))
2855 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2856 else
2857 oparms.create_options = 0;
2858
2859 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
2860 access_flags = WRITE_OWNER;
2861 else
2862 access_flags = WRITE_DAC;
2863
2864 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002865 if (!utf16_path) {
2866 rc = -ENOMEM;
2867 free_xid(xid);
2868 return rc;
2869 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002870
2871 oparms.tcon = tcon;
2872 oparms.desired_access = access_flags;
2873 oparms.disposition = FILE_OPEN;
2874 oparms.path = path;
2875 oparms.fid = &fid;
2876 oparms.reconnect = false;
2877
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002878 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002879 kfree(utf16_path);
2880 if (!rc) {
2881 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2882 fid.volatile_fid, pnntsd, acllen, aclflag);
2883 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2884 }
2885
2886 cifs_put_tlink(tlink);
2887 free_xid(xid);
2888 return rc;
2889}
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002890
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002891/* Retrieve an ACL from the server */
2892static struct cifs_ntsd *
2893get_smb2_acl(struct cifs_sb_info *cifs_sb,
2894 struct inode *inode, const char *path,
2895 u32 *pacllen)
2896{
2897 struct cifs_ntsd *pntsd = NULL;
2898 struct cifsFileInfo *open_file = NULL;
2899
2900 if (inode)
2901 open_file = find_readable_file(CIFS_I(inode), true);
2902 if (!open_file)
2903 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
2904
2905 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
2906 cifsFileInfo_put(open_file);
2907 return pntsd;
2908}
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002909
Steve French30175622014-08-17 18:16:40 -05002910static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2911 loff_t offset, loff_t len, bool keep_size)
2912{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002913 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05002914 struct inode *inode;
2915 struct cifsInodeInfo *cifsi;
2916 struct cifsFileInfo *cfile = file->private_data;
2917 struct file_zero_data_information fsctl_buf;
2918 long rc;
2919 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002920 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05002921
2922 xid = get_xid();
2923
David Howells2b0143b2015-03-17 22:25:59 +00002924 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05002925 cifsi = CIFS_I(inode);
2926
Christoph Probsta205d502019-05-08 21:36:25 +02002927 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05002928 ses->Suid, offset, len);
2929
2930
Steve French30175622014-08-17 18:16:40 -05002931 /* if file not oplocked can't be sure whether asking to extend size */
2932 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002933 if (keep_size == false) {
2934 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002935 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
2936 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002937 free_xid(xid);
2938 return rc;
2939 }
Steve French30175622014-08-17 18:16:40 -05002940
Steve Frenchd1c35af2019-05-09 00:09:37 -05002941 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05002942
2943 fsctl_buf.FileOffset = cpu_to_le64(offset);
2944 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2945
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10002946 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2947 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
2948 (char *)&fsctl_buf,
2949 sizeof(struct file_zero_data_information),
2950 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002951 if (rc)
2952 goto zero_range_exit;
2953
2954 /*
2955 * do we also need to change the size of the file?
2956 */
2957 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002958 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10002959 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
2960 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002961 }
2962
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002963 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05002964 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05002965 if (rc)
2966 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
2967 ses->Suid, offset, len, rc);
2968 else
2969 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
2970 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05002971 return rc;
2972}
2973
Steve French31742c52014-08-17 08:38:47 -05002974static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2975 loff_t offset, loff_t len)
2976{
2977 struct inode *inode;
Steve French31742c52014-08-17 08:38:47 -05002978 struct cifsFileInfo *cfile = file->private_data;
2979 struct file_zero_data_information fsctl_buf;
2980 long rc;
2981 unsigned int xid;
2982 __u8 set_sparse = 1;
2983
2984 xid = get_xid();
2985
David Howells2b0143b2015-03-17 22:25:59 +00002986 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05002987
2988 /* Need to make file sparse, if not already, before freeing range. */
2989 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05002990 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
2991 rc = -EOPNOTSUPP;
2992 free_xid(xid);
2993 return rc;
2994 }
Steve French31742c52014-08-17 08:38:47 -05002995
Christoph Probsta205d502019-05-08 21:36:25 +02002996 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05002997
2998 fsctl_buf.FileOffset = cpu_to_le64(offset);
2999 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3000
3001 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3002 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01003003 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05003004 sizeof(struct file_zero_data_information),
3005 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05003006 free_xid(xid);
3007 return rc;
3008}
3009
Steve French9ccf3212014-10-18 17:01:15 -05003010static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3011 loff_t off, loff_t len, bool keep_size)
3012{
3013 struct inode *inode;
3014 struct cifsInodeInfo *cifsi;
3015 struct cifsFileInfo *cfile = file->private_data;
3016 long rc = -EOPNOTSUPP;
3017 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003018 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05003019
3020 xid = get_xid();
3021
David Howells2b0143b2015-03-17 22:25:59 +00003022 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05003023 cifsi = CIFS_I(inode);
3024
Steve French779ede02019-03-13 01:41:49 -05003025 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3026 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003027 /* if file not oplocked can't be sure whether asking to extend size */
3028 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003029 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05003030 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3031 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003032 free_xid(xid);
3033 return rc;
3034 }
Steve French9ccf3212014-10-18 17:01:15 -05003035
3036 /*
3037 * Files are non-sparse by default so falloc may be a no-op
3038 * Must check if file sparse. If not sparse, and not extending
3039 * then no need to do anything since file already allocated
3040 */
3041 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
3042 if (keep_size == true)
Steve Frenchcfe89092018-05-19 02:04:55 -05003043 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05003044 /* check if extending file */
3045 else if (i_size_read(inode) >= off + len)
3046 /* not extending file and already not sparse */
Steve Frenchcfe89092018-05-19 02:04:55 -05003047 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05003048 /* BB: in future add else clause to extend file */
3049 else
Steve Frenchcfe89092018-05-19 02:04:55 -05003050 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003051 if (rc)
3052 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3053 tcon->tid, tcon->ses->Suid, off, len, rc);
3054 else
3055 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid,
3056 tcon->tid, tcon->ses->Suid, off, len);
Steve Frenchcfe89092018-05-19 02:04:55 -05003057 free_xid(xid);
3058 return rc;
Steve French9ccf3212014-10-18 17:01:15 -05003059 }
3060
3061 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3062 /*
3063 * Check if falloc starts within first few pages of file
3064 * and ends within a few pages of the end of file to
3065 * ensure that most of file is being forced to be
3066 * fallocated now. If so then setting whole file sparse
3067 * ie potentially making a few extra pages at the beginning
3068 * or end of the file non-sparse via set_sparse is harmless.
3069 */
Steve Frenchcfe89092018-05-19 02:04:55 -05003070 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3071 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003072 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3073 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003074 free_xid(xid);
3075 return rc;
3076 }
Steve French9ccf3212014-10-18 17:01:15 -05003077
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003078 smb2_set_sparse(xid, tcon, cfile, inode, false);
3079 rc = 0;
3080 } else {
3081 smb2_set_sparse(xid, tcon, cfile, inode, false);
3082 rc = 0;
3083 if (i_size_read(inode) < off + len) {
3084 eof = cpu_to_le64(off + len);
3085 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3086 cfile->fid.volatile_fid, cfile->pid,
3087 &eof);
3088 }
Steve French9ccf3212014-10-18 17:01:15 -05003089 }
Steve French9ccf3212014-10-18 17:01:15 -05003090
Steve French779ede02019-03-13 01:41:49 -05003091 if (rc)
3092 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3093 tcon->ses->Suid, off, len, rc);
3094 else
3095 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3096 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003097
3098 free_xid(xid);
3099 return rc;
3100}
3101
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003102static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3103{
3104 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3105 struct cifsInodeInfo *cifsi;
3106 struct inode *inode;
3107 int rc = 0;
3108 struct file_allocated_range_buffer in_data, *out_data = NULL;
3109 u32 out_data_len;
3110 unsigned int xid;
3111
3112 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3113 return generic_file_llseek(file, offset, whence);
3114
3115 inode = d_inode(cfile->dentry);
3116 cifsi = CIFS_I(inode);
3117
3118 if (offset < 0 || offset >= i_size_read(inode))
3119 return -ENXIO;
3120
3121 xid = get_xid();
3122 /*
3123 * We need to be sure that all dirty pages are written as they
3124 * might fill holes on the server.
3125 * Note that we also MUST flush any written pages since at least
3126 * some servers (Windows2016) will not reflect recent writes in
3127 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3128 */
3129 wrcfile = find_writable_file(cifsi, false);
3130 if (wrcfile) {
3131 filemap_write_and_wait(inode->i_mapping);
3132 smb2_flush_file(xid, tcon, &wrcfile->fid);
3133 cifsFileInfo_put(wrcfile);
3134 }
3135
3136 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3137 if (whence == SEEK_HOLE)
3138 offset = i_size_read(inode);
3139 goto lseek_exit;
3140 }
3141
3142 in_data.file_offset = cpu_to_le64(offset);
3143 in_data.length = cpu_to_le64(i_size_read(inode));
3144
3145 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3146 cfile->fid.volatile_fid,
3147 FSCTL_QUERY_ALLOCATED_RANGES, true,
3148 (char *)&in_data, sizeof(in_data),
3149 sizeof(struct file_allocated_range_buffer),
3150 (char **)&out_data, &out_data_len);
3151 if (rc == -E2BIG)
3152 rc = 0;
3153 if (rc)
3154 goto lseek_exit;
3155
3156 if (whence == SEEK_HOLE && out_data_len == 0)
3157 goto lseek_exit;
3158
3159 if (whence == SEEK_DATA && out_data_len == 0) {
3160 rc = -ENXIO;
3161 goto lseek_exit;
3162 }
3163
3164 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3165 rc = -EINVAL;
3166 goto lseek_exit;
3167 }
3168 if (whence == SEEK_DATA) {
3169 offset = le64_to_cpu(out_data->file_offset);
3170 goto lseek_exit;
3171 }
3172 if (offset < le64_to_cpu(out_data->file_offset))
3173 goto lseek_exit;
3174
3175 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3176
3177 lseek_exit:
3178 free_xid(xid);
3179 kfree(out_data);
3180 if (!rc)
3181 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3182 else
3183 return rc;
3184}
3185
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003186static int smb3_fiemap(struct cifs_tcon *tcon,
3187 struct cifsFileInfo *cfile,
3188 struct fiemap_extent_info *fei, u64 start, u64 len)
3189{
3190 unsigned int xid;
3191 struct file_allocated_range_buffer in_data, *out_data;
3192 u32 out_data_len;
3193 int i, num, rc, flags, last_blob;
3194 u64 next;
3195
3196 if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
3197 return -EBADR;
3198
3199 xid = get_xid();
3200 again:
3201 in_data.file_offset = cpu_to_le64(start);
3202 in_data.length = cpu_to_le64(len);
3203
3204 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3205 cfile->fid.volatile_fid,
3206 FSCTL_QUERY_ALLOCATED_RANGES, true,
3207 (char *)&in_data, sizeof(in_data),
3208 1024 * sizeof(struct file_allocated_range_buffer),
3209 (char **)&out_data, &out_data_len);
3210 if (rc == -E2BIG) {
3211 last_blob = 0;
3212 rc = 0;
3213 } else
3214 last_blob = 1;
3215 if (rc)
3216 goto out;
3217
3218 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3219 rc = -EINVAL;
3220 goto out;
3221 }
3222 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3223 rc = -EINVAL;
3224 goto out;
3225 }
3226
3227 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3228 for (i = 0; i < num; i++) {
3229 flags = 0;
3230 if (i == num - 1 && last_blob)
3231 flags |= FIEMAP_EXTENT_LAST;
3232
3233 rc = fiemap_fill_next_extent(fei,
3234 le64_to_cpu(out_data[i].file_offset),
3235 le64_to_cpu(out_data[i].file_offset),
3236 le64_to_cpu(out_data[i].length),
3237 flags);
3238 if (rc < 0)
3239 goto out;
3240 if (rc == 1) {
3241 rc = 0;
3242 goto out;
3243 }
3244 }
3245
3246 if (!last_blob) {
3247 next = le64_to_cpu(out_data[num - 1].file_offset) +
3248 le64_to_cpu(out_data[num - 1].length);
3249 len = len - (next - start);
3250 start = next;
3251 goto again;
3252 }
3253
3254 out:
3255 free_xid(xid);
3256 kfree(out_data);
3257 return rc;
3258}
Steve French9ccf3212014-10-18 17:01:15 -05003259
Steve French31742c52014-08-17 08:38:47 -05003260static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3261 loff_t off, loff_t len)
3262{
3263 /* KEEP_SIZE already checked for by do_fallocate */
3264 if (mode & FALLOC_FL_PUNCH_HOLE)
3265 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05003266 else if (mode & FALLOC_FL_ZERO_RANGE) {
3267 if (mode & FALLOC_FL_KEEP_SIZE)
3268 return smb3_zero_range(file, tcon, off, len, true);
3269 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05003270 } else if (mode == FALLOC_FL_KEEP_SIZE)
3271 return smb3_simple_falloc(file, tcon, off, len, true);
3272 else if (mode == 0)
3273 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05003274
3275 return -EOPNOTSUPP;
3276}
3277
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003278static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003279smb2_downgrade_oplock(struct TCP_Server_Info *server,
3280 struct cifsInodeInfo *cinode, bool set_level2)
3281{
3282 if (set_level2)
3283 server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
3284 0, NULL);
3285 else
3286 server->ops->set_oplock_level(cinode, 0, 0, NULL);
3287}
3288
3289static void
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003290smb21_downgrade_oplock(struct TCP_Server_Info *server,
3291 struct cifsInodeInfo *cinode, bool set_level2)
3292{
3293 server->ops->set_oplock_level(cinode,
3294 set_level2 ? SMB2_LEASE_READ_CACHING_HE :
3295 0, 0, NULL);
3296}
3297
3298static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003299smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3300 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003301{
3302 oplock &= 0xFF;
3303 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3304 return;
3305 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003306 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003307 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3308 &cinode->vfs_inode);
3309 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003310 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003311 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3312 &cinode->vfs_inode);
3313 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3314 cinode->oplock = CIFS_CACHE_READ_FLG;
3315 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3316 &cinode->vfs_inode);
3317 } else
3318 cinode->oplock = 0;
3319}
3320
3321static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003322smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3323 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003324{
3325 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003326 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003327
3328 oplock &= 0xFF;
3329 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3330 return;
3331
Pavel Shilovskya016e272019-09-26 12:31:20 -07003332 /* Check if the server granted an oplock rather than a lease */
3333 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3334 return smb2_set_oplock_level(cinode, oplock, epoch,
3335 purge_cache);
3336
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003337 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003338 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003339 strcat(message, "R");
3340 }
3341 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003342 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003343 strcat(message, "H");
3344 }
3345 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003346 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003347 strcat(message, "W");
3348 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003349 if (!new_oplock)
3350 strncpy(message, "None", sizeof(message));
3351
3352 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003353 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3354 &cinode->vfs_inode);
3355}
3356
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003357static void
3358smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3359 unsigned int epoch, bool *purge_cache)
3360{
3361 unsigned int old_oplock = cinode->oplock;
3362
3363 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3364
3365 if (purge_cache) {
3366 *purge_cache = false;
3367 if (old_oplock == CIFS_CACHE_READ_FLG) {
3368 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3369 (epoch - cinode->epoch > 0))
3370 *purge_cache = true;
3371 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3372 (epoch - cinode->epoch > 1))
3373 *purge_cache = true;
3374 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3375 (epoch - cinode->epoch > 1))
3376 *purge_cache = true;
3377 else if (cinode->oplock == 0 &&
3378 (epoch - cinode->epoch > 0))
3379 *purge_cache = true;
3380 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
3381 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3382 (epoch - cinode->epoch > 0))
3383 *purge_cache = true;
3384 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3385 (epoch - cinode->epoch > 1))
3386 *purge_cache = true;
3387 }
3388 cinode->epoch = epoch;
3389 }
3390}
3391
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003392static bool
3393smb2_is_read_op(__u32 oplock)
3394{
3395 return oplock == SMB2_OPLOCK_LEVEL_II;
3396}
3397
3398static bool
3399smb21_is_read_op(__u32 oplock)
3400{
3401 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3402 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3403}
3404
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003405static __le32
3406map_oplock_to_lease(u8 oplock)
3407{
3408 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3409 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3410 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3411 return SMB2_LEASE_READ_CACHING;
3412 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3413 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3414 SMB2_LEASE_WRITE_CACHING;
3415 return 0;
3416}
3417
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003418static char *
3419smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3420{
3421 struct create_lease *buf;
3422
3423 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
3424 if (!buf)
3425 return NULL;
3426
Stefano Brivio729c0c92018-07-05 15:10:02 +02003427 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003428 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003429
3430 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3431 (struct create_lease, lcontext));
3432 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
3433 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3434 (struct create_lease, Name));
3435 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003436 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003437 buf->Name[0] = 'R';
3438 buf->Name[1] = 'q';
3439 buf->Name[2] = 'L';
3440 buf->Name[3] = 's';
3441 return (char *)buf;
3442}
3443
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003444static char *
3445smb3_create_lease_buf(u8 *lease_key, u8 oplock)
3446{
3447 struct create_lease_v2 *buf;
3448
3449 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
3450 if (!buf)
3451 return NULL;
3452
Stefano Brivio729c0c92018-07-05 15:10:02 +02003453 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003454 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3455
3456 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3457 (struct create_lease_v2, lcontext));
3458 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
3459 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3460 (struct create_lease_v2, Name));
3461 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003462 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003463 buf->Name[0] = 'R';
3464 buf->Name[1] = 'q';
3465 buf->Name[2] = 'L';
3466 buf->Name[3] = 's';
3467 return (char *)buf;
3468}
3469
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003470static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003471smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003472{
3473 struct create_lease *lc = (struct create_lease *)buf;
3474
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003475 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003476 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3477 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3478 return le32_to_cpu(lc->lcontext.LeaseState);
3479}
3480
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003481static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003482smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003483{
3484 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
3485
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003486 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003487 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3488 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003489 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02003490 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003491 return le32_to_cpu(lc->lcontext.LeaseState);
3492}
3493
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003494static unsigned int
3495smb2_wp_retry_size(struct inode *inode)
3496{
3497 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
3498 SMB2_MAX_BUFFER_SIZE);
3499}
3500
Pavel Shilovsky52755802014-08-18 20:49:57 +04003501static bool
3502smb2_dir_needs_close(struct cifsFileInfo *cfile)
3503{
3504 return !cfile->invalidHandle;
3505}
3506
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003507static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003508fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
Steve French2b2f7542019-06-07 15:16:10 -05003509 struct smb_rqst *old_rq, __le16 cipher_type)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003510{
3511 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003512 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003513
3514 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
3515 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
3516 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
3517 tr_hdr->Flags = cpu_to_le16(0x01);
Steve French2b2f7542019-06-07 15:16:10 -05003518 if (cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3519 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3520 else
3521 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003522 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003523}
3524
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003525/* We can not use the normal sg_set_buf() as we will sometimes pass a
3526 * stack object as buf.
3527 */
3528static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3529 unsigned int buflen)
3530{
Sebastien Tisserantee9d6612019-08-01 12:06:08 -05003531 void *addr;
3532 /*
3533 * VMAP_STACK (at least) puts stack into the vmalloc address space
3534 */
3535 if (is_vmalloc_addr(buf))
3536 addr = vmalloc_to_page(buf);
3537 else
3538 addr = virt_to_page(buf);
3539 sg_set_page(sg, addr, buflen, offset_in_page(buf));
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003540}
3541
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003542/* Assumes the first rqst has a transform header as the first iov.
3543 * I.e.
3544 * rqst[0].rq_iov[0] is transform header
3545 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
3546 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003547 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003548static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003549init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003550{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003551 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003552 struct scatterlist *sg;
3553 unsigned int i;
3554 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003555 unsigned int idx = 0;
3556 int skip;
3557
3558 sg_len = 1;
3559 for (i = 0; i < num_rqst; i++)
3560 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003561
3562 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
3563 if (!sg)
3564 return NULL;
3565
3566 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003567 for (i = 0; i < num_rqst; i++) {
3568 for (j = 0; j < rqst[i].rq_nvec; j++) {
3569 /*
3570 * The first rqst has a transform header where the
3571 * first 20 bytes are not part of the encrypted blob
3572 */
3573 skip = (i == 0) && (j == 0) ? 20 : 0;
3574 smb2_sg_set_buf(&sg[idx++],
3575 rqst[i].rq_iov[j].iov_base + skip,
3576 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10003577 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05003578
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003579 for (j = 0; j < rqst[i].rq_npages; j++) {
3580 unsigned int len, offset;
3581
3582 rqst_page_get_length(&rqst[i], j, &len, &offset);
3583 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
3584 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003585 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003586 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003587 return sg;
3588}
3589
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003590static int
3591smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
3592{
3593 struct cifs_ses *ses;
3594 u8 *ses_enc_key;
3595
3596 spin_lock(&cifs_tcp_ses_lock);
3597 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3598 if (ses->Suid != ses_id)
3599 continue;
3600 ses_enc_key = enc ? ses->smb3encryptionkey :
3601 ses->smb3decryptionkey;
3602 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
3603 spin_unlock(&cifs_tcp_ses_lock);
3604 return 0;
3605 }
3606 spin_unlock(&cifs_tcp_ses_lock);
3607
3608 return 1;
3609}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003610/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003611 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
3612 * iov[0] - transform header (associate data),
3613 * iov[1-N] - SMB2 header and pages - data to encrypt.
3614 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003615 * untouched.
3616 */
3617static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003618crypt_message(struct TCP_Server_Info *server, int num_rqst,
3619 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003620{
3621 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003622 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003623 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003624 int rc = 0;
3625 struct scatterlist *sg;
3626 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003627 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003628 struct aead_request *req;
3629 char *iv;
3630 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003631 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003632 struct crypto_aead *tfm;
3633 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3634
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003635 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
3636 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003637 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003638 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003639 return 0;
3640 }
3641
3642 rc = smb3_crypto_aead_allocate(server);
3643 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003644 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003645 return rc;
3646 }
3647
3648 tfm = enc ? server->secmech.ccmaesencrypt :
3649 server->secmech.ccmaesdecrypt;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003650 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003651 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003652 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003653 return rc;
3654 }
3655
3656 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3657 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003658 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003659 return rc;
3660 }
3661
3662 req = aead_request_alloc(tfm, GFP_KERNEL);
3663 if (!req) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003664 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003665 return -ENOMEM;
3666 }
3667
3668 if (!enc) {
3669 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3670 crypt_len += SMB2_SIGNATURE_SIZE;
3671 }
3672
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003673 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003674 if (!sg) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003675 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003676 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003677 goto free_req;
3678 }
3679
3680 iv_len = crypto_aead_ivsize(tfm);
3681 iv = kzalloc(iv_len, GFP_KERNEL);
3682 if (!iv) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003683 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003684 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003685 goto free_sg;
3686 }
Steve French2b2f7542019-06-07 15:16:10 -05003687
3688 if (server->cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3689 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3690 else {
3691 iv[0] = 3;
3692 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
3693 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003694
3695 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
3696 aead_request_set_ad(req, assoc_data_len);
3697
3698 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003699 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003700
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003701 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
3702 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003703
3704 if (!rc && enc)
3705 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
3706
3707 kfree(iv);
3708free_sg:
3709 kfree(sg);
3710free_req:
3711 kfree(req);
3712 return rc;
3713}
3714
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003715void
3716smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003717{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003718 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003719
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003720 for (i = 0; i < num_rqst; i++) {
3721 if (rqst[i].rq_pages) {
3722 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
3723 put_page(rqst[i].rq_pages[j]);
3724 kfree(rqst[i].rq_pages);
3725 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003726 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003727}
3728
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003729/*
3730 * This function will initialize new_rq and encrypt the content.
3731 * The first entry, new_rq[0], only contains a single iov which contains
3732 * a smb2_transform_hdr and is pre-allocated by the caller.
3733 * This function then populates new_rq[1+] with the content from olq_rq[0+].
3734 *
3735 * The end result is an array of smb_rqst structures where the first structure
3736 * only contains a single iov for the transform header which we then can pass
3737 * to crypt_message().
3738 *
3739 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
3740 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
3741 */
3742static int
3743smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
3744 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003745{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003746 struct page **pages;
3747 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
3748 unsigned int npages;
3749 unsigned int orig_len = 0;
3750 int i, j;
3751 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003752
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003753 for (i = 1; i < num_rqst; i++) {
3754 npages = old_rq[i - 1].rq_npages;
3755 pages = kmalloc_array(npages, sizeof(struct page *),
3756 GFP_KERNEL);
3757 if (!pages)
3758 goto err_free;
3759
3760 new_rq[i].rq_pages = pages;
3761 new_rq[i].rq_npages = npages;
3762 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
3763 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
3764 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
3765 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
3766 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
3767
3768 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
3769
3770 for (j = 0; j < npages; j++) {
3771 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3772 if (!pages[j])
3773 goto err_free;
3774 }
3775
3776 /* copy pages form the old */
3777 for (j = 0; j < npages; j++) {
3778 char *dst, *src;
3779 unsigned int offset, len;
3780
3781 rqst_page_get_length(&new_rq[i], j, &len, &offset);
3782
3783 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
3784 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
3785
3786 memcpy(dst, src, len);
3787 kunmap(new_rq[i].rq_pages[j]);
3788 kunmap(old_rq[i - 1].rq_pages[j]);
3789 }
3790 }
3791
3792 /* fill the 1st iov with a transform header */
Steve French2b2f7542019-06-07 15:16:10 -05003793 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003794
3795 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02003796 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003797 if (rc)
3798 goto err_free;
3799
3800 return rc;
3801
3802err_free:
3803 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
3804 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003805}
3806
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003807static int
3808smb3_is_transform_hdr(void *buf)
3809{
3810 struct smb2_transform_hdr *trhdr = buf;
3811
3812 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
3813}
3814
3815static int
3816decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
3817 unsigned int buf_data_size, struct page **pages,
3818 unsigned int npages, unsigned int page_data_size)
3819{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003820 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003821 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003822 int rc;
3823
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003824 iov[0].iov_base = buf;
3825 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
3826 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
3827 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003828
3829 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003830 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003831 rqst.rq_pages = pages;
3832 rqst.rq_npages = npages;
3833 rqst.rq_pagesz = PAGE_SIZE;
3834 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
3835
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003836 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02003837 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003838
3839 if (rc)
3840 return rc;
3841
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003842 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003843
3844 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003845
3846 return rc;
3847}
3848
3849static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003850read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
3851 unsigned int npages, unsigned int len)
3852{
3853 int i;
3854 int length;
3855
3856 for (i = 0; i < npages; i++) {
3857 struct page *page = pages[i];
3858 size_t n;
3859
3860 n = len;
3861 if (len >= PAGE_SIZE) {
3862 /* enough data to fill the page */
3863 n = PAGE_SIZE;
3864 len -= n;
3865 } else {
3866 zero_user(page, len, PAGE_SIZE - len);
3867 len = 0;
3868 }
Long Li1dbe3462018-05-30 12:47:55 -07003869 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003870 if (length < 0)
3871 return length;
3872 server->total_read += length;
3873 }
3874
3875 return 0;
3876}
3877
3878static int
3879init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
3880 unsigned int cur_off, struct bio_vec **page_vec)
3881{
3882 struct bio_vec *bvec;
3883 int i;
3884
3885 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
3886 if (!bvec)
3887 return -ENOMEM;
3888
3889 for (i = 0; i < npages; i++) {
3890 bvec[i].bv_page = pages[i];
3891 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
3892 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
3893 data_size -= bvec[i].bv_len;
3894 }
3895
3896 if (data_size != 0) {
3897 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
3898 kfree(bvec);
3899 return -EIO;
3900 }
3901
3902 *page_vec = bvec;
3903 return 0;
3904}
3905
3906static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003907handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3908 char *buf, unsigned int buf_len, struct page **pages,
3909 unsigned int npages, unsigned int page_data_size)
3910{
3911 unsigned int data_offset;
3912 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003913 unsigned int cur_off;
3914 unsigned int cur_page_idx;
3915 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003916 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10003917 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003918 struct bio_vec *bvec = NULL;
3919 struct iov_iter iter;
3920 struct kvec iov;
3921 int length;
Long Li74dcf412017-11-22 17:38:46 -07003922 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003923
3924 if (shdr->Command != SMB2_READ) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003925 cifs_server_dbg(VFS, "only big read responses are supported\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003926 return -ENOTSUPP;
3927 }
3928
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003929 if (server->ops->is_session_expired &&
3930 server->ops->is_session_expired(buf)) {
3931 cifs_reconnect(server);
3932 wake_up(&server->response_q);
3933 return -1;
3934 }
3935
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003936 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08003937 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003938 return -1;
3939
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003940 /* set up first two iov to get credits */
3941 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003942 rdata->iov[0].iov_len = 0;
3943 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003944 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003945 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003946 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3947 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3948 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3949 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3950
3951 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003952 if (rdata->result != 0) {
3953 cifs_dbg(FYI, "%s: server returned error %d\n",
3954 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003955 /* normal error on read response */
3956 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003957 return 0;
3958 }
3959
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003960 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07003961#ifdef CONFIG_CIFS_SMB_DIRECT
3962 use_rdma_mr = rdata->mr;
3963#endif
3964 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003965
3966 if (data_offset < server->vals->read_rsp_size) {
3967 /*
3968 * win2k8 sometimes sends an offset of 0 when the read
3969 * is beyond the EOF. Treat it as if the data starts just after
3970 * the header.
3971 */
3972 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
3973 __func__, data_offset);
3974 data_offset = server->vals->read_rsp_size;
3975 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
3976 /* data_offset is beyond the end of smallbuf */
3977 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
3978 __func__, data_offset);
3979 rdata->result = -EIO;
3980 dequeue_mid(mid, rdata->result);
3981 return 0;
3982 }
3983
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003984 pad_len = data_offset - server->vals->read_rsp_size;
3985
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003986 if (buf_len <= data_offset) {
3987 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003988 cur_page_idx = pad_len / PAGE_SIZE;
3989 cur_off = pad_len % PAGE_SIZE;
3990
3991 if (cur_page_idx != 0) {
3992 /* data offset is beyond the 1st page of response */
3993 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
3994 __func__, data_offset);
3995 rdata->result = -EIO;
3996 dequeue_mid(mid, rdata->result);
3997 return 0;
3998 }
3999
4000 if (data_len > page_data_size - pad_len) {
4001 /* data_len is corrupt -- discard frame */
4002 rdata->result = -EIO;
4003 dequeue_mid(mid, rdata->result);
4004 return 0;
4005 }
4006
4007 rdata->result = init_read_bvec(pages, npages, page_data_size,
4008 cur_off, &bvec);
4009 if (rdata->result != 0) {
4010 dequeue_mid(mid, rdata->result);
4011 return 0;
4012 }
4013
David Howellsaa563d72018-10-20 00:57:56 +01004014 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004015 } else if (buf_len >= data_offset + data_len) {
4016 /* read response payload is in buf */
4017 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
4018 iov.iov_base = buf + data_offset;
4019 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01004020 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004021 } else {
4022 /* read response payload cannot be in both buf and pages */
4023 WARN_ONCE(1, "buf can not contain only a part of read data");
4024 rdata->result = -EIO;
4025 dequeue_mid(mid, rdata->result);
4026 return 0;
4027 }
4028
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004029 length = rdata->copy_into_pages(server, rdata, &iter);
4030
4031 kfree(bvec);
4032
4033 if (length < 0)
4034 return length;
4035
4036 dequeue_mid(mid, false);
4037 return length;
4038}
4039
Steve French35cf94a2019-09-07 01:09:49 -05004040struct smb2_decrypt_work {
4041 struct work_struct decrypt;
4042 struct TCP_Server_Info *server;
4043 struct page **ppages;
4044 char *buf;
4045 unsigned int npages;
4046 unsigned int len;
4047};
4048
4049
4050static void smb2_decrypt_offload(struct work_struct *work)
4051{
4052 struct smb2_decrypt_work *dw = container_of(work,
4053 struct smb2_decrypt_work, decrypt);
4054 int i, rc;
4055 struct mid_q_entry *mid;
4056
4057 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
4058 dw->ppages, dw->npages, dw->len);
4059 if (rc) {
4060 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4061 goto free_pages;
4062 }
4063
Steve French22553972019-09-13 16:47:31 -05004064 dw->server->lstrp = jiffies;
Steve French35cf94a2019-09-07 01:09:49 -05004065 mid = smb2_find_mid(dw->server, dw->buf);
4066 if (mid == NULL)
4067 cifs_dbg(FYI, "mid not found\n");
4068 else {
4069 mid->decrypted = true;
4070 rc = handle_read_data(dw->server, mid, dw->buf,
4071 dw->server->vals->read_rsp_size,
4072 dw->ppages, dw->npages, dw->len);
Steve French22553972019-09-13 16:47:31 -05004073 mid->callback(mid);
4074 cifs_mid_q_entry_release(mid);
Steve French35cf94a2019-09-07 01:09:49 -05004075 }
4076
Steve French35cf94a2019-09-07 01:09:49 -05004077free_pages:
4078 for (i = dw->npages-1; i >= 0; i--)
4079 put_page(dw->ppages[i]);
4080
4081 kfree(dw->ppages);
4082 cifs_small_buf_release(dw->buf);
Steve Frencha08d8972019-10-26 16:00:44 -05004083 kfree(dw);
Steve French35cf94a2019-09-07 01:09:49 -05004084}
4085
4086
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004087static int
Steve French35cf94a2019-09-07 01:09:49 -05004088receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4089 int *num_mids)
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004090{
4091 char *buf = server->smallbuf;
4092 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4093 unsigned int npages;
4094 struct page **pages;
4095 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004096 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004097 int rc;
4098 int i = 0;
Steve French35cf94a2019-09-07 01:09:49 -05004099 struct smb2_decrypt_work *dw;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004100
Steve French35cf94a2019-09-07 01:09:49 -05004101 *num_mids = 1;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004102 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004103 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4104
4105 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4106 if (rc < 0)
4107 return rc;
4108 server->total_read += rc;
4109
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004110 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11004111 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004112 npages = DIV_ROUND_UP(len, PAGE_SIZE);
4113
4114 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
4115 if (!pages) {
4116 rc = -ENOMEM;
4117 goto discard_data;
4118 }
4119
4120 for (; i < npages; i++) {
4121 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4122 if (!pages[i]) {
4123 rc = -ENOMEM;
4124 goto discard_data;
4125 }
4126 }
4127
4128 /* read read data into pages */
4129 rc = read_data_into_pages(server, pages, npages, len);
4130 if (rc)
4131 goto free_pages;
4132
Pavel Shilovsky350be252017-04-10 10:31:33 -07004133 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004134 if (rc)
4135 goto free_pages;
4136
Steve French35cf94a2019-09-07 01:09:49 -05004137 /*
4138 * For large reads, offload to different thread for better performance,
4139 * use more cores decrypting which can be expensive
4140 */
4141
Steve French10328c42019-09-09 13:30:15 -05004142 if ((server->min_offload) && (server->in_flight > 1) &&
Steve French563317e2019-09-08 23:22:02 -05004143 (server->pdu_size >= server->min_offload)) {
Steve French35cf94a2019-09-07 01:09:49 -05004144 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4145 if (dw == NULL)
4146 goto non_offloaded_decrypt;
4147
4148 dw->buf = server->smallbuf;
4149 server->smallbuf = (char *)cifs_small_buf_get();
4150
4151 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4152
4153 dw->npages = npages;
4154 dw->server = server;
4155 dw->ppages = pages;
4156 dw->len = len;
Steve Frencha08d8972019-10-26 16:00:44 -05004157 queue_work(decrypt_wq, &dw->decrypt);
Steve French35cf94a2019-09-07 01:09:49 -05004158 *num_mids = 0; /* worker thread takes care of finding mid */
4159 return -1;
4160 }
4161
4162non_offloaded_decrypt:
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004163 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004164 pages, npages, len);
4165 if (rc)
4166 goto free_pages;
4167
4168 *mid = smb2_find_mid(server, buf);
4169 if (*mid == NULL)
4170 cifs_dbg(FYI, "mid not found\n");
4171 else {
4172 cifs_dbg(FYI, "mid found\n");
4173 (*mid)->decrypted = true;
4174 rc = handle_read_data(server, *mid, buf,
4175 server->vals->read_rsp_size,
4176 pages, npages, len);
4177 }
4178
4179free_pages:
4180 for (i = i - 1; i >= 0; i--)
4181 put_page(pages[i]);
4182 kfree(pages);
4183 return rc;
4184discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07004185 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004186 goto free_pages;
4187}
4188
4189static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004190receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004191 struct mid_q_entry **mids, char **bufs,
4192 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004193{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004194 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004195 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004196 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004197 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004198 unsigned int buf_size;
4199 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004200 int next_is_large;
4201 char *next_buffer = NULL;
4202
4203 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004204
4205 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004206 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004207 server->large_buf = true;
4208 memcpy(server->bigbuf, buf, server->total_read);
4209 buf = server->bigbuf;
4210 }
4211
4212 /* now read the rest */
4213 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004214 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004215 if (length < 0)
4216 return length;
4217 server->total_read += length;
4218
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004219 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004220 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
4221 if (length)
4222 return length;
4223
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004224 next_is_large = server->large_buf;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004225one_more:
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004226 shdr = (struct smb2_sync_hdr *)buf;
4227 if (shdr->NextCommand) {
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004228 if (next_is_large)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004229 next_buffer = (char *)cifs_buf_get();
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004230 else
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004231 next_buffer = (char *)cifs_small_buf_get();
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004232 memcpy(next_buffer,
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004233 buf + le32_to_cpu(shdr->NextCommand),
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004234 pdu_length - le32_to_cpu(shdr->NextCommand));
4235 }
4236
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004237 mid_entry = smb2_find_mid(server, buf);
4238 if (mid_entry == NULL)
4239 cifs_dbg(FYI, "mid not found\n");
4240 else {
4241 cifs_dbg(FYI, "mid found\n");
4242 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004243 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004244 }
4245
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004246 if (*num_mids >= MAX_COMPOUND) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004247 cifs_server_dbg(VFS, "too many PDUs in compound\n");
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004248 return -1;
4249 }
4250 bufs[*num_mids] = buf;
4251 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004252
4253 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004254 ret = mid_entry->handle(server, mid_entry);
4255 else
4256 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004257
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004258 if (ret == 0 && shdr->NextCommand) {
4259 pdu_length -= le32_to_cpu(shdr->NextCommand);
4260 server->large_buf = next_is_large;
4261 if (next_is_large)
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004262 server->bigbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004263 else
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004264 server->smallbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004265 goto one_more;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004266 } else if (ret != 0) {
4267 /*
4268 * ret != 0 here means that we didn't get to handle_mid() thus
4269 * server->smallbuf and server->bigbuf are still valid. We need
4270 * to free next_buffer because it is not going to be used
4271 * anywhere.
4272 */
4273 if (next_is_large)
4274 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4275 else
4276 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004277 }
4278
4279 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004280}
4281
4282static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004283smb3_receive_transform(struct TCP_Server_Info *server,
4284 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004285{
4286 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004287 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004288 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4289 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4290
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004291 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004292 sizeof(struct smb2_sync_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004293 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004294 pdu_length);
4295 cifs_reconnect(server);
4296 wake_up(&server->response_q);
4297 return -ECONNABORTED;
4298 }
4299
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004300 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004301 cifs_server_dbg(VFS, "Transform message is broken\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004302 cifs_reconnect(server);
4303 wake_up(&server->response_q);
4304 return -ECONNABORTED;
4305 }
4306
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004307 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004308 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
Steve French35cf94a2019-09-07 01:09:49 -05004309 return receive_encrypted_read(server, &mids[0], num_mids);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004310 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004311
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004312 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004313}
4314
4315int
4316smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4317{
4318 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
4319
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004320 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004321 NULL, 0, 0);
4322}
4323
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004324static int
4325smb2_next_header(char *buf)
4326{
4327 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
4328 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
4329
4330 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
4331 return sizeof(struct smb2_transform_hdr) +
4332 le32_to_cpu(t_hdr->OriginalMessageSize);
4333
4334 return le32_to_cpu(hdr->NextCommand);
4335}
4336
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004337static int
4338smb2_make_node(unsigned int xid, struct inode *inode,
4339 struct dentry *dentry, struct cifs_tcon *tcon,
4340 char *full_path, umode_t mode, dev_t dev)
4341{
4342 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4343 int rc = -EPERM;
4344 int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
4345 FILE_ALL_INFO *buf = NULL;
4346 struct cifs_io_parms io_parms;
4347 __u32 oplock = 0;
4348 struct cifs_fid fid;
4349 struct cifs_open_parms oparms;
4350 unsigned int bytes_written;
4351 struct win_dev *pdev;
4352 struct kvec iov[2];
4353
4354 /*
4355 * Check if mounted with mount parm 'sfu' mount parm.
4356 * SFU emulation should work with all servers, but only
4357 * supports block and char device (no socket & fifo),
4358 * and was used by default in earlier versions of Windows
4359 */
4360 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
4361 goto out;
4362
4363 /*
4364 * TODO: Add ability to create instead via reparse point. Windows (e.g.
4365 * their current NFS server) uses this approach to expose special files
4366 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
4367 */
4368
4369 if (!S_ISCHR(mode) && !S_ISBLK(mode))
4370 goto out;
4371
4372 cifs_dbg(FYI, "sfu compat create special file\n");
4373
4374 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
4375 if (buf == NULL) {
4376 rc = -ENOMEM;
4377 goto out;
4378 }
4379
4380 if (backup_cred(cifs_sb))
4381 create_options |= CREATE_OPEN_BACKUP_INTENT;
4382
4383 oparms.tcon = tcon;
4384 oparms.cifs_sb = cifs_sb;
4385 oparms.desired_access = GENERIC_WRITE;
4386 oparms.create_options = create_options;
4387 oparms.disposition = FILE_CREATE;
4388 oparms.path = full_path;
4389 oparms.fid = &fid;
4390 oparms.reconnect = false;
4391
4392 if (tcon->ses->server->oplocks)
4393 oplock = REQ_OPLOCK;
4394 else
4395 oplock = 0;
4396 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
4397 if (rc)
4398 goto out;
4399
4400 /*
4401 * BB Do not bother to decode buf since no local inode yet to put
4402 * timestamps in, but we can reuse it safely.
4403 */
4404
4405 pdev = (struct win_dev *)buf;
4406 io_parms.pid = current->tgid;
4407 io_parms.tcon = tcon;
4408 io_parms.offset = 0;
4409 io_parms.length = sizeof(struct win_dev);
4410 iov[1].iov_base = buf;
4411 iov[1].iov_len = sizeof(struct win_dev);
4412 if (S_ISCHR(mode)) {
4413 memcpy(pdev->type, "IntxCHR", 8);
4414 pdev->major = cpu_to_le64(MAJOR(dev));
4415 pdev->minor = cpu_to_le64(MINOR(dev));
4416 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4417 &bytes_written, iov, 1);
4418 } else if (S_ISBLK(mode)) {
4419 memcpy(pdev->type, "IntxBLK", 8);
4420 pdev->major = cpu_to_le64(MAJOR(dev));
4421 pdev->minor = cpu_to_le64(MINOR(dev));
4422 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4423 &bytes_written, iov, 1);
4424 }
4425 tcon->ses->server->ops->close(xid, tcon, &fid);
4426 d_drop(dentry);
4427
4428 /* FIXME: add code here to set EAs */
4429out:
4430 kfree(buf);
4431 return rc;
4432}
4433
4434
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004435struct smb_version_operations smb20_operations = {
4436 .compare_fids = smb2_compare_fids,
4437 .setup_request = smb2_setup_request,
4438 .setup_async_request = smb2_setup_async_request,
4439 .check_receive = smb2_check_receive,
4440 .add_credits = smb2_add_credits,
4441 .set_credits = smb2_set_credits,
4442 .get_credits_field = smb2_get_credits_field,
4443 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004444 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004445 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004446 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004447 .read_data_offset = smb2_read_data_offset,
4448 .read_data_length = smb2_read_data_length,
4449 .map_error = map_smb2_to_linux_error,
4450 .find_mid = smb2_find_mid,
4451 .check_message = smb2_check_message,
4452 .dump_detail = smb2_dump_detail,
4453 .clear_stats = smb2_clear_stats,
4454 .print_stats = smb2_print_stats,
4455 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004456 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004457 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004458 .need_neg = smb2_need_neg,
4459 .negotiate = smb2_negotiate,
4460 .negotiate_wsize = smb2_negotiate_wsize,
4461 .negotiate_rsize = smb2_negotiate_rsize,
4462 .sess_setup = SMB2_sess_setup,
4463 .logoff = SMB2_logoff,
4464 .tree_connect = SMB2_tcon,
4465 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004466 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004467 .is_path_accessible = smb2_is_path_accessible,
4468 .can_echo = smb2_can_echo,
4469 .echo = SMB2_echo,
4470 .query_path_info = smb2_query_path_info,
4471 .get_srv_inum = smb2_get_srv_inum,
4472 .query_file_info = smb2_query_file_info,
4473 .set_path_size = smb2_set_path_size,
4474 .set_file_size = smb2_set_file_size,
4475 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004476 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004477 .mkdir = smb2_mkdir,
4478 .mkdir_setinfo = smb2_mkdir_setinfo,
4479 .rmdir = smb2_rmdir,
4480 .unlink = smb2_unlink,
4481 .rename = smb2_rename_path,
4482 .create_hardlink = smb2_create_hardlink,
4483 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01004484 .query_mf_symlink = smb3_query_mf_symlink,
4485 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004486 .open = smb2_open_file,
4487 .set_fid = smb2_set_fid,
4488 .close = smb2_close_file,
4489 .flush = smb2_flush_file,
4490 .async_readv = smb2_async_readv,
4491 .async_writev = smb2_async_writev,
4492 .sync_read = smb2_sync_read,
4493 .sync_write = smb2_sync_write,
4494 .query_dir_first = smb2_query_dir_first,
4495 .query_dir_next = smb2_query_dir_next,
4496 .close_dir = smb2_close_dir,
4497 .calc_smb_size = smb2_calc_size,
4498 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004499 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004500 .oplock_response = smb2_oplock_response,
4501 .queryfs = smb2_queryfs,
4502 .mand_lock = smb2_mand_lock,
4503 .mand_unlock_range = smb2_unlock_range,
4504 .push_mand_locks = smb2_push_mandatory_locks,
4505 .get_lease_key = smb2_get_lease_key,
4506 .set_lease_key = smb2_set_lease_key,
4507 .new_lease_key = smb2_new_lease_key,
4508 .calc_signature = smb2_calc_signature,
4509 .is_read_op = smb2_is_read_op,
4510 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004511 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004512 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004513 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004514 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004515 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004516 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304517 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004518#ifdef CONFIG_CIFS_XATTR
4519 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004520 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004521#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004522 .get_acl = get_smb2_acl,
4523 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004524 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004525 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004526 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004527 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004528 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004529 .llseek = smb3_llseek,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004530};
4531
Steve French1080ef72011-02-24 18:07:19 +00004532struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004533 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004534 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04004535 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004536 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04004537 .add_credits = smb2_add_credits,
4538 .set_credits = smb2_set_credits,
4539 .get_credits_field = smb2_get_credits_field,
4540 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004541 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004542 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004543 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004544 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004545 .read_data_offset = smb2_read_data_offset,
4546 .read_data_length = smb2_read_data_length,
4547 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004548 .find_mid = smb2_find_mid,
4549 .check_message = smb2_check_message,
4550 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04004551 .clear_stats = smb2_clear_stats,
4552 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004553 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004554 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004555 .downgrade_oplock = smb21_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04004556 .need_neg = smb2_need_neg,
4557 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07004558 .negotiate_wsize = smb2_negotiate_wsize,
4559 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04004560 .sess_setup = SMB2_sess_setup,
4561 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04004562 .tree_connect = SMB2_tcon,
4563 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004564 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04004565 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04004566 .can_echo = smb2_can_echo,
4567 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04004568 .query_path_info = smb2_query_path_info,
4569 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07004570 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004571 .set_path_size = smb2_set_path_size,
4572 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004573 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004574 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04004575 .mkdir = smb2_mkdir,
4576 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04004577 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07004578 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004579 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07004580 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004581 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004582 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004583 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07004584 .open = smb2_open_file,
4585 .set_fid = smb2_set_fid,
4586 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07004587 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004588 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07004589 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004590 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004591 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004592 .query_dir_first = smb2_query_dir_first,
4593 .query_dir_next = smb2_query_dir_next,
4594 .close_dir = smb2_close_dir,
4595 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07004596 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004597 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004598 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07004599 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07004600 .mand_lock = smb2_mand_lock,
4601 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07004602 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07004603 .get_lease_key = smb2_get_lease_key,
4604 .set_lease_key = smb2_set_lease_key,
4605 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06004606 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004607 .is_read_op = smb21_is_read_op,
4608 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004609 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004610 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004611 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004612 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004613 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05004614 .enum_snapshots = smb3_enum_snapshots,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004615 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304616 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004617#ifdef CONFIG_CIFS_XATTR
4618 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004619 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004620#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004621 .get_acl = get_smb2_acl,
4622 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004623 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004624 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004625 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004626 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004627 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004628 .llseek = smb3_llseek,
Steve French38107d42012-12-08 22:08:06 -06004629};
4630
Steve French38107d42012-12-08 22:08:06 -06004631struct smb_version_operations smb30_operations = {
4632 .compare_fids = smb2_compare_fids,
4633 .setup_request = smb2_setup_request,
4634 .setup_async_request = smb2_setup_async_request,
4635 .check_receive = smb2_check_receive,
4636 .add_credits = smb2_add_credits,
4637 .set_credits = smb2_set_credits,
4638 .get_credits_field = smb2_get_credits_field,
4639 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004640 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004641 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06004642 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004643 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06004644 .read_data_offset = smb2_read_data_offset,
4645 .read_data_length = smb2_read_data_length,
4646 .map_error = map_smb2_to_linux_error,
4647 .find_mid = smb2_find_mid,
4648 .check_message = smb2_check_message,
4649 .dump_detail = smb2_dump_detail,
4650 .clear_stats = smb2_clear_stats,
4651 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05004652 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06004653 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004654 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004655 .downgrade_oplock = smb21_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06004656 .need_neg = smb2_need_neg,
4657 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004658 .negotiate_wsize = smb3_negotiate_wsize,
4659 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06004660 .sess_setup = SMB2_sess_setup,
4661 .logoff = SMB2_logoff,
4662 .tree_connect = SMB2_tcon,
4663 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05004664 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06004665 .is_path_accessible = smb2_is_path_accessible,
4666 .can_echo = smb2_can_echo,
4667 .echo = SMB2_echo,
4668 .query_path_info = smb2_query_path_info,
4669 .get_srv_inum = smb2_get_srv_inum,
4670 .query_file_info = smb2_query_file_info,
4671 .set_path_size = smb2_set_path_size,
4672 .set_file_size = smb2_set_file_size,
4673 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004674 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06004675 .mkdir = smb2_mkdir,
4676 .mkdir_setinfo = smb2_mkdir_setinfo,
4677 .rmdir = smb2_rmdir,
4678 .unlink = smb2_unlink,
4679 .rename = smb2_rename_path,
4680 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004681 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004682 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004683 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06004684 .open = smb2_open_file,
4685 .set_fid = smb2_set_fid,
4686 .close = smb2_close_file,
4687 .flush = smb2_flush_file,
4688 .async_readv = smb2_async_readv,
4689 .async_writev = smb2_async_writev,
4690 .sync_read = smb2_sync_read,
4691 .sync_write = smb2_sync_write,
4692 .query_dir_first = smb2_query_dir_first,
4693 .query_dir_next = smb2_query_dir_next,
4694 .close_dir = smb2_close_dir,
4695 .calc_smb_size = smb2_calc_size,
4696 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004697 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06004698 .oplock_response = smb2_oplock_response,
4699 .queryfs = smb2_queryfs,
4700 .mand_lock = smb2_mand_lock,
4701 .mand_unlock_range = smb2_unlock_range,
4702 .push_mand_locks = smb2_push_mandatory_locks,
4703 .get_lease_key = smb2_get_lease_key,
4704 .set_lease_key = smb2_set_lease_key,
4705 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004706 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06004707 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004708 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004709 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004710 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004711 .create_lease_buf = smb3_create_lease_buf,
4712 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004713 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05004714 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06004715 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004716 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004717 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05004718 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004719 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004720 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004721 .is_transform_hdr = smb3_is_transform_hdr,
4722 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004723 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304724 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004725#ifdef CONFIG_CIFS_XATTR
4726 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004727 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004728#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004729 .get_acl = get_smb2_acl,
4730 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004731 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004732 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004733 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004734 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004735 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004736 .llseek = smb3_llseek,
Steve French1080ef72011-02-24 18:07:19 +00004737};
4738
Steve Frenchaab18932015-06-23 23:37:11 -05004739struct smb_version_operations smb311_operations = {
4740 .compare_fids = smb2_compare_fids,
4741 .setup_request = smb2_setup_request,
4742 .setup_async_request = smb2_setup_async_request,
4743 .check_receive = smb2_check_receive,
4744 .add_credits = smb2_add_credits,
4745 .set_credits = smb2_set_credits,
4746 .get_credits_field = smb2_get_credits_field,
4747 .get_credits = smb2_get_credits,
4748 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004749 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05004750 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004751 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05004752 .read_data_offset = smb2_read_data_offset,
4753 .read_data_length = smb2_read_data_length,
4754 .map_error = map_smb2_to_linux_error,
4755 .find_mid = smb2_find_mid,
4756 .check_message = smb2_check_message,
4757 .dump_detail = smb2_dump_detail,
4758 .clear_stats = smb2_clear_stats,
4759 .print_stats = smb2_print_stats,
4760 .dump_share_caps = smb2_dump_share_caps,
4761 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004762 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004763 .downgrade_oplock = smb21_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05004764 .need_neg = smb2_need_neg,
4765 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004766 .negotiate_wsize = smb3_negotiate_wsize,
4767 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05004768 .sess_setup = SMB2_sess_setup,
4769 .logoff = SMB2_logoff,
4770 .tree_connect = SMB2_tcon,
4771 .tree_disconnect = SMB2_tdis,
4772 .qfs_tcon = smb3_qfs_tcon,
4773 .is_path_accessible = smb2_is_path_accessible,
4774 .can_echo = smb2_can_echo,
4775 .echo = SMB2_echo,
4776 .query_path_info = smb2_query_path_info,
4777 .get_srv_inum = smb2_get_srv_inum,
4778 .query_file_info = smb2_query_file_info,
4779 .set_path_size = smb2_set_path_size,
4780 .set_file_size = smb2_set_file_size,
4781 .set_file_info = smb2_set_file_info,
4782 .set_compression = smb2_set_compression,
4783 .mkdir = smb2_mkdir,
4784 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05004785 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05004786 .rmdir = smb2_rmdir,
4787 .unlink = smb2_unlink,
4788 .rename = smb2_rename_path,
4789 .create_hardlink = smb2_create_hardlink,
4790 .query_symlink = smb2_query_symlink,
4791 .query_mf_symlink = smb3_query_mf_symlink,
4792 .create_mf_symlink = smb3_create_mf_symlink,
4793 .open = smb2_open_file,
4794 .set_fid = smb2_set_fid,
4795 .close = smb2_close_file,
4796 .flush = smb2_flush_file,
4797 .async_readv = smb2_async_readv,
4798 .async_writev = smb2_async_writev,
4799 .sync_read = smb2_sync_read,
4800 .sync_write = smb2_sync_write,
4801 .query_dir_first = smb2_query_dir_first,
4802 .query_dir_next = smb2_query_dir_next,
4803 .close_dir = smb2_close_dir,
4804 .calc_smb_size = smb2_calc_size,
4805 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004806 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05004807 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05004808 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05004809 .mand_lock = smb2_mand_lock,
4810 .mand_unlock_range = smb2_unlock_range,
4811 .push_mand_locks = smb2_push_mandatory_locks,
4812 .get_lease_key = smb2_get_lease_key,
4813 .set_lease_key = smb2_set_lease_key,
4814 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004815 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05004816 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004817 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05004818 .is_read_op = smb21_is_read_op,
4819 .set_oplock_level = smb3_set_oplock_level,
4820 .create_lease_buf = smb3_create_lease_buf,
4821 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004822 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07004823 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05004824/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
4825 .wp_retry_size = smb2_wp_retry_size,
4826 .dir_needs_close = smb2_dir_needs_close,
4827 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004828 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004829 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004830 .is_transform_hdr = smb3_is_transform_hdr,
4831 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004832 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304833 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004834#ifdef CONFIG_CIFS_XATTR
4835 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004836 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004837#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10004838 .get_acl = get_smb2_acl,
4839 .get_acl_by_fid = get_smb2_acl_by_fid,
4840 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004841 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004842 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004843 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004844 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004845 .llseek = smb3_llseek,
Steve Frenchaab18932015-06-23 23:37:11 -05004846};
Steve Frenchaab18932015-06-23 23:37:11 -05004847
Steve Frenchdd446b12012-11-28 23:21:06 -06004848struct smb_version_values smb20_values = {
4849 .version_string = SMB20_VERSION_STRING,
4850 .protocol_id = SMB20_PROT_ID,
4851 .req_capabilities = 0, /* MBZ */
4852 .large_lock_type = 0,
4853 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4854 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4855 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004856 .header_size = sizeof(struct smb2_sync_hdr),
4857 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06004858 .max_header_size = MAX_SMB2_HDR_SIZE,
4859 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4860 .lock_cmd = SMB2_LOCK,
4861 .cap_unix = 0,
4862 .cap_nt_find = SMB2_NT_FIND,
4863 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004864 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4865 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004866 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06004867};
4868
Steve French1080ef72011-02-24 18:07:19 +00004869struct smb_version_values smb21_values = {
4870 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004871 .protocol_id = SMB21_PROT_ID,
4872 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
4873 .large_lock_type = 0,
4874 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4875 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4876 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004877 .header_size = sizeof(struct smb2_sync_hdr),
4878 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004879 .max_header_size = MAX_SMB2_HDR_SIZE,
4880 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4881 .lock_cmd = SMB2_LOCK,
4882 .cap_unix = 0,
4883 .cap_nt_find = SMB2_NT_FIND,
4884 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004885 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4886 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004887 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05004888};
4889
Steve French9764c022017-09-17 10:41:35 -05004890struct smb_version_values smb3any_values = {
4891 .version_string = SMB3ANY_VERSION_STRING,
4892 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004893 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004894 .large_lock_type = 0,
4895 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4896 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4897 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004898 .header_size = sizeof(struct smb2_sync_hdr),
4899 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004900 .max_header_size = MAX_SMB2_HDR_SIZE,
4901 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4902 .lock_cmd = SMB2_LOCK,
4903 .cap_unix = 0,
4904 .cap_nt_find = SMB2_NT_FIND,
4905 .cap_large_files = SMB2_LARGE_FILES,
4906 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4907 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4908 .create_lease_size = sizeof(struct create_lease_v2),
4909};
4910
4911struct smb_version_values smbdefault_values = {
4912 .version_string = SMBDEFAULT_VERSION_STRING,
4913 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004914 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004915 .large_lock_type = 0,
4916 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4917 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4918 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004919 .header_size = sizeof(struct smb2_sync_hdr),
4920 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004921 .max_header_size = MAX_SMB2_HDR_SIZE,
4922 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4923 .lock_cmd = SMB2_LOCK,
4924 .cap_unix = 0,
4925 .cap_nt_find = SMB2_NT_FIND,
4926 .cap_large_files = SMB2_LARGE_FILES,
4927 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4928 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4929 .create_lease_size = sizeof(struct create_lease_v2),
4930};
4931
Steve Frenche4aa25e2012-10-01 12:26:22 -05004932struct smb_version_values smb30_values = {
4933 .version_string = SMB30_VERSION_STRING,
4934 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004935 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004936 .large_lock_type = 0,
4937 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4938 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4939 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004940 .header_size = sizeof(struct smb2_sync_hdr),
4941 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004942 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004943 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004944 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004945 .cap_unix = 0,
4946 .cap_nt_find = SMB2_NT_FIND,
4947 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004948 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4949 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004950 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00004951};
Steve French20b6d8b2013-06-12 22:48:41 -05004952
4953struct smb_version_values smb302_values = {
4954 .version_string = SMB302_VERSION_STRING,
4955 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004956 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05004957 .large_lock_type = 0,
4958 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4959 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4960 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004961 .header_size = sizeof(struct smb2_sync_hdr),
4962 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05004963 .max_header_size = MAX_SMB2_HDR_SIZE,
4964 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4965 .lock_cmd = SMB2_LOCK,
4966 .cap_unix = 0,
4967 .cap_nt_find = SMB2_NT_FIND,
4968 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004969 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4970 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004971 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05004972};
Steve French5f7fbf72014-12-17 22:52:58 -06004973
Steve French5f7fbf72014-12-17 22:52:58 -06004974struct smb_version_values smb311_values = {
4975 .version_string = SMB311_VERSION_STRING,
4976 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004977 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06004978 .large_lock_type = 0,
4979 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4980 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4981 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004982 .header_size = sizeof(struct smb2_sync_hdr),
4983 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06004984 .max_header_size = MAX_SMB2_HDR_SIZE,
4985 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4986 .lock_cmd = SMB2_LOCK,
4987 .cap_unix = 0,
4988 .cap_nt_find = SMB2_NT_FIND,
4989 .cap_large_files = SMB2_LARGE_FILES,
4990 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4991 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4992 .create_lease_size = sizeof(struct create_lease_v2),
4993};