blob: eaed180613143652c1f6ef21049da0056790b41b [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Steve French1080ef72011-02-24 18:07:19 +00006 */
7
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07008#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07009#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050010#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070011#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020012#include <linux/uuid.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070013#include <crypto/aead.h>
Steve French1080ef72011-02-24 18:07:19 +000014#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040015#include "smb2pdu.h"
16#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040017#include "cifsproto.h"
18#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040019#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070020#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070021#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050022#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070023#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040024
Pavel Shilovskyef68e832019-01-18 17:25:36 -080025/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040026static int
27change_conf(struct TCP_Server_Info *server)
28{
29 server->credits += server->echo_credits + server->oplock_credits;
30 server->oplock_credits = server->echo_credits = 0;
31 switch (server->credits) {
32 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080033 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040034 case 1:
35 server->echoes = false;
36 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040037 break;
38 case 2:
39 server->echoes = true;
40 server->oplocks = false;
41 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040042 break;
43 default:
44 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050045 if (enable_oplocks) {
46 server->oplocks = true;
47 server->oplock_credits = 1;
48 } else
49 server->oplocks = false;
50
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040051 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040052 }
53 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080054 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040055}
56
57static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080058smb2_add_credits(struct TCP_Server_Info *server,
59 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040060{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080061 int *val, rc = -1;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080062 unsigned int add = credits->value;
63 unsigned int instance = credits->instance;
64 bool reconnect_detected = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080065
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040066 spin_lock(&server->req_lock);
67 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050068
69 /* eg found case where write overlapping reconnect messed up credits */
70 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
71 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
72 server->hostname, *val);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080073 if ((instance == 0) || (instance == server->reconnect_instance))
74 *val += add;
75 else
76 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050077
Steve French141891f2016-09-23 00:44:16 -050078 if (*val > 65000) {
79 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
80 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
81 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040082 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040083 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040084 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070085 /*
86 * Sometimes server returns 0 credits on oplock break ack - we need to
87 * rebalance credits in this case.
88 */
89 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
90 server->oplocks) {
91 if (server->credits > 1) {
92 server->credits--;
93 server->oplock_credits++;
94 }
95 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040096 spin_unlock(&server->req_lock);
97 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -080098
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080099 if (reconnect_detected)
100 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
101 add, instance);
102
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800103 if (server->tcpStatus == CifsNeedReconnect
104 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800105 return;
106
107 switch (rc) {
108 case -1:
109 /* change_conf hasn't been executed */
110 break;
111 case 0:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000112 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800113 break;
114 case 1:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000115 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800116 break;
117 case 2:
118 cifs_dbg(FYI, "disabling oplocks\n");
119 break;
120 default:
121 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
122 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400123}
124
125static void
126smb2_set_credits(struct TCP_Server_Info *server, const int val)
127{
128 spin_lock(&server->req_lock);
129 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500130 if (val == 1)
131 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400132 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500133 /* don't log while holding the lock */
134 if (val == 1)
135 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400136}
137
138static int *
139smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
140{
141 switch (optype) {
142 case CIFS_ECHO_OP:
143 return &server->echo_credits;
144 case CIFS_OBREAK_OP:
145 return &server->oplock_credits;
146 default:
147 return &server->credits;
148 }
149}
150
151static unsigned int
152smb2_get_credits(struct mid_q_entry *mid)
153{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000154 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700155
Pavel Shilovsky3d3003f2019-01-22 16:50:21 -0800156 if (mid->mid_state == MID_RESPONSE_RECEIVED
157 || mid->mid_state == MID_RESPONSE_MALFORMED)
158 return le16_to_cpu(shdr->CreditRequest);
159
160 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400161}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400162
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400163static int
164smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800165 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400166{
167 int rc = 0;
168 unsigned int scredits;
169
170 spin_lock(&server->req_lock);
171 while (1) {
172 if (server->credits <= 0) {
173 spin_unlock(&server->req_lock);
174 cifs_num_waiters_inc(server);
175 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000176 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400177 cifs_num_waiters_dec(server);
178 if (rc)
179 return rc;
180 spin_lock(&server->req_lock);
181 } else {
182 if (server->tcpStatus == CifsExiting) {
183 spin_unlock(&server->req_lock);
184 return -ENOENT;
185 }
186
187 scredits = server->credits;
188 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800189 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400190 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800191 credits->value = 0;
192 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400193 break;
194 }
195
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800196 /* leave some credits for reopen and other ops */
197 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400198 *num = min_t(unsigned int, size,
199 scredits * SMB2_MAX_BUFFER_SIZE);
200
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800201 credits->value =
202 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
203 credits->instance = server->reconnect_instance;
204 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400205 server->in_flight++;
Steve French1b63f182019-09-09 22:57:11 -0500206 if (server->in_flight > server->max_in_flight)
207 server->max_in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400208 break;
209 }
210 }
211 spin_unlock(&server->req_lock);
212 return rc;
213}
214
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800215static int
216smb2_adjust_credits(struct TCP_Server_Info *server,
217 struct cifs_credits *credits,
218 const unsigned int payload_size)
219{
220 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
221
222 if (!credits->value || credits->value == new_val)
223 return 0;
224
225 if (credits->value < new_val) {
226 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
227 credits->value, new_val);
228 return -ENOTSUPP;
229 }
230
231 spin_lock(&server->req_lock);
232
233 if (server->reconnect_instance != credits->instance) {
234 spin_unlock(&server->req_lock);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000235 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800236 credits->value - new_val);
237 return -EAGAIN;
238 }
239
240 server->credits += credits->value - new_val;
241 spin_unlock(&server->req_lock);
242 wake_up(&server->request_q);
243 credits->value = new_val;
244 return 0;
245}
246
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400247static __u64
248smb2_get_next_mid(struct TCP_Server_Info *server)
249{
250 __u64 mid;
251 /* for SMB2 we need the current value */
252 spin_lock(&GlobalMid_Lock);
253 mid = server->CurrentMid++;
254 spin_unlock(&GlobalMid_Lock);
255 return mid;
256}
Steve French1080ef72011-02-24 18:07:19 +0000257
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800258static void
259smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
260{
261 spin_lock(&GlobalMid_Lock);
262 if (server->CurrentMid >= val)
263 server->CurrentMid -= val;
264 spin_unlock(&GlobalMid_Lock);
265}
266
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400267static struct mid_q_entry *
268smb2_find_mid(struct TCP_Server_Info *server, char *buf)
269{
270 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000271 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700272 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400273
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700274 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000275 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600276 return NULL;
277 }
278
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400279 spin_lock(&GlobalMid_Lock);
280 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000281 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400282 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700283 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200284 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400285 spin_unlock(&GlobalMid_Lock);
286 return mid;
287 }
288 }
289 spin_unlock(&GlobalMid_Lock);
290 return NULL;
291}
292
293static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600294smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400295{
296#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000297 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400298
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000299 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700300 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
301 shdr->ProcessId);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000302 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500303 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400304#endif
305}
306
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400307static bool
308smb2_need_neg(struct TCP_Server_Info *server)
309{
310 return server->max_read == 0;
311}
312
313static int
314smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
315{
316 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200317
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400318 ses->server->CurrentMid = 0;
319 rc = SMB2_negotiate(xid, ses);
320 /* BB we probably don't need to retry with modern servers */
321 if (rc == -EAGAIN)
322 rc = -EHOSTDOWN;
323 return rc;
324}
325
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700326static unsigned int
327smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
328{
329 struct TCP_Server_Info *server = tcon->ses->server;
330 unsigned int wsize;
331
332 /* start with specified wsize, or default */
333 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
334 wsize = min_t(unsigned int, wsize, server->max_write);
Long Li09902f82017-11-22 17:38:39 -0700335#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700336 if (server->rdma) {
337 if (server->sign)
338 wsize = min_t(unsigned int,
339 wsize, server->smbd_conn->max_fragmented_send_size);
340 else
341 wsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700342 wsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700343 }
Long Li09902f82017-11-22 17:38:39 -0700344#endif
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400345 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
346 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700347
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700348 return wsize;
349}
350
351static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500352smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
353{
354 struct TCP_Server_Info *server = tcon->ses->server;
355 unsigned int wsize;
356
357 /* start with specified wsize, or default */
358 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
359 wsize = min_t(unsigned int, wsize, server->max_write);
360#ifdef CONFIG_CIFS_SMB_DIRECT
361 if (server->rdma) {
362 if (server->sign)
363 wsize = min_t(unsigned int,
364 wsize, server->smbd_conn->max_fragmented_send_size);
365 else
366 wsize = min_t(unsigned int,
367 wsize, server->smbd_conn->max_readwrite_size);
368 }
369#endif
370 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
371 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
372
373 return wsize;
374}
375
376static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700377smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
378{
379 struct TCP_Server_Info *server = tcon->ses->server;
380 unsigned int rsize;
381
382 /* start with specified rsize, or default */
383 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
384 rsize = min_t(unsigned int, rsize, server->max_read);
Long Li09902f82017-11-22 17:38:39 -0700385#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700386 if (server->rdma) {
387 if (server->sign)
388 rsize = min_t(unsigned int,
389 rsize, server->smbd_conn->max_fragmented_recv_size);
390 else
391 rsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700392 rsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700393 }
Long Li09902f82017-11-22 17:38:39 -0700394#endif
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400395
396 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
397 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700398
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700399 return rsize;
400}
401
Steve French3d621232018-09-25 15:33:47 -0500402static unsigned int
403smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
404{
405 struct TCP_Server_Info *server = tcon->ses->server;
406 unsigned int rsize;
407
408 /* start with specified rsize, or default */
409 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
410 rsize = min_t(unsigned int, rsize, server->max_read);
411#ifdef CONFIG_CIFS_SMB_DIRECT
412 if (server->rdma) {
413 if (server->sign)
414 rsize = min_t(unsigned int,
415 rsize, server->smbd_conn->max_fragmented_recv_size);
416 else
417 rsize = min_t(unsigned int,
418 rsize, server->smbd_conn->max_readwrite_size);
419 }
420#endif
421
422 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
423 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
424
425 return rsize;
426}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200427
428static int
429parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
430 size_t buf_len,
431 struct cifs_server_iface **iface_list,
432 size_t *iface_count)
433{
434 struct network_interface_info_ioctl_rsp *p;
435 struct sockaddr_in *addr4;
436 struct sockaddr_in6 *addr6;
437 struct iface_info_ipv4 *p4;
438 struct iface_info_ipv6 *p6;
439 struct cifs_server_iface *info;
440 ssize_t bytes_left;
441 size_t next = 0;
442 int nb_iface = 0;
443 int rc = 0;
444
445 *iface_list = NULL;
446 *iface_count = 0;
447
448 /*
449 * Fist pass: count and sanity check
450 */
451
452 bytes_left = buf_len;
453 p = buf;
454 while (bytes_left >= sizeof(*p)) {
455 nb_iface++;
456 next = le32_to_cpu(p->Next);
457 if (!next) {
458 bytes_left -= sizeof(*p);
459 break;
460 }
461 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
462 bytes_left -= next;
463 }
464
465 if (!nb_iface) {
466 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
467 rc = -EINVAL;
468 goto out;
469 }
470
471 if (bytes_left || p->Next)
472 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
473
474
475 /*
476 * Second pass: extract info to internal structure
477 */
478
479 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
480 if (!*iface_list) {
481 rc = -ENOMEM;
482 goto out;
483 }
484
485 info = *iface_list;
486 bytes_left = buf_len;
487 p = buf;
488 while (bytes_left >= sizeof(*p)) {
489 info->speed = le64_to_cpu(p->LinkSpeed);
490 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
491 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
492
493 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
494 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
495 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
496 le32_to_cpu(p->Capability));
497
498 switch (p->Family) {
499 /*
500 * The kernel and wire socket structures have the same
501 * layout and use network byte order but make the
502 * conversion explicit in case either one changes.
503 */
504 case INTERNETWORK:
505 addr4 = (struct sockaddr_in *)&info->sockaddr;
506 p4 = (struct iface_info_ipv4 *)p->Buffer;
507 addr4->sin_family = AF_INET;
508 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
509
510 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
511 addr4->sin_port = cpu_to_be16(CIFS_PORT);
512
513 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
514 &addr4->sin_addr);
515 break;
516 case INTERNETWORKV6:
517 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
518 p6 = (struct iface_info_ipv6 *)p->Buffer;
519 addr6->sin6_family = AF_INET6;
520 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
521
522 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
523 addr6->sin6_flowinfo = 0;
524 addr6->sin6_scope_id = 0;
525 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
526
527 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
528 &addr6->sin6_addr);
529 break;
530 default:
531 cifs_dbg(VFS,
532 "%s: skipping unsupported socket family\n",
533 __func__);
534 goto next_iface;
535 }
536
537 (*iface_count)++;
538 info++;
539next_iface:
540 next = le32_to_cpu(p->Next);
541 if (!next)
542 break;
543 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
544 bytes_left -= next;
545 }
546
547 if (!*iface_count) {
548 rc = -EINVAL;
549 goto out;
550 }
551
552out:
553 if (rc) {
554 kfree(*iface_list);
555 *iface_count = 0;
556 *iface_list = NULL;
557 }
558 return rc;
559}
560
561
Steve Frenchc481e9f2013-10-14 01:21:53 -0500562static int
563SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
564{
565 int rc;
566 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200567 struct network_interface_info_ioctl_rsp *out_buf = NULL;
568 struct cifs_server_iface *iface_list;
569 size_t iface_count;
570 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500571
572 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
573 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
574 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500575 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500576 if (rc == -EOPNOTSUPP) {
577 cifs_dbg(FYI,
578 "server does not support query network interfaces\n");
579 goto out;
580 } else if (rc != 0) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000581 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200582 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500583 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200584
585 rc = parse_server_interfaces(out_buf, ret_data_len,
586 &iface_list, &iface_count);
587 if (rc)
588 goto out;
589
590 spin_lock(&ses->iface_lock);
591 kfree(ses->iface_list);
592 ses->iface_list = iface_list;
593 ses->iface_count = iface_count;
594 ses->iface_last_update = jiffies;
595 spin_unlock(&ses->iface_lock);
596
597out:
Steve French24df1482016-09-29 04:20:23 -0500598 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500599 return rc;
600}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500601
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000602static void
603smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000604{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000605 struct cached_fid *cfid = container_of(ref, struct cached_fid,
606 refcount);
607
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000608 if (cfid->is_valid) {
609 cifs_dbg(FYI, "clear cached root file handle\n");
610 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
611 cfid->fid->volatile_fid);
612 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000613 cfid->file_all_info_is_valid = false;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000614 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000615}
616
617void close_shroot(struct cached_fid *cfid)
618{
619 mutex_lock(&cfid->fid_mutex);
620 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000621 mutex_unlock(&cfid->fid_mutex);
622}
623
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000624void
625smb2_cached_lease_break(struct work_struct *work)
626{
627 struct cached_fid *cfid = container_of(work,
628 struct cached_fid, lease_break);
629
630 close_shroot(cfid);
631}
632
Steve French3d4ef9a2018-04-25 22:19:09 -0500633/*
634 * Open the directory at the root of a share
635 */
636int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
637{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000638 struct cifs_ses *ses = tcon->ses;
639 struct TCP_Server_Info *server = ses->server;
640 struct cifs_open_parms oparms;
641 struct smb2_create_rsp *o_rsp = NULL;
642 struct smb2_query_info_rsp *qi_rsp = NULL;
643 int resp_buftype[2];
644 struct smb_rqst rqst[2];
645 struct kvec rsp_iov[2];
646 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
647 struct kvec qi_iov[1];
648 int rc, flags = 0;
649 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000650 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Steve French3d4ef9a2018-04-25 22:19:09 -0500651
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000652 mutex_lock(&tcon->crfid.fid_mutex);
653 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500654 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000655 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000656 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000657 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500658 return 0;
659 }
660
Steve French96d9f7e2019-09-12 17:52:54 -0500661 /*
662 * We do not hold the lock for the open because in case
663 * SMB2_open needs to reconnect, it will end up calling
664 * cifs_mark_open_files_invalid() which takes the lock again
665 * thus causing a deadlock
666 */
667
668 mutex_unlock(&tcon->crfid.fid_mutex);
669
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000670 if (smb3_encryption_required(tcon))
671 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500672
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000673 memset(rqst, 0, sizeof(rqst));
674 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
675 memset(rsp_iov, 0, sizeof(rsp_iov));
676
677 /* Open */
678 memset(&open_iov, 0, sizeof(open_iov));
679 rqst[0].rq_iov = open_iov;
680 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
681
682 oparms.tcon = tcon;
683 oparms.create_options = 0;
684 oparms.desired_access = FILE_READ_ATTRIBUTES;
685 oparms.disposition = FILE_OPEN;
686 oparms.fid = pfid;
687 oparms.reconnect = false;
688
689 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
690 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500691 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000692 smb2_set_next_command(tcon, &rqst[0]);
693
694 memset(&qi_iov, 0, sizeof(qi_iov));
695 rqst[1].rq_iov = qi_iov;
696 rqst[1].rq_nvec = 1;
697
698 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
699 COMPOUND_FID, FILE_ALL_INFORMATION,
700 SMB2_O_INFO_FILE, 0,
701 sizeof(struct smb2_file_all_info) +
702 PATH_MAX * 2, 0, NULL);
703 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500704 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000705
706 smb2_set_related(&rqst[1]);
707
708 rc = compound_send_recv(xid, ses, flags, 2, rqst,
709 resp_buftype, rsp_iov);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200710 mutex_lock(&tcon->crfid.fid_mutex);
711
712 /*
713 * Now we need to check again as the cached root might have
714 * been successfully re-opened from a concurrent process
715 */
716
717 if (tcon->crfid.is_valid) {
718 /* work was already done */
719
720 /* stash fids for close() later */
721 struct cifs_fid fid = {
722 .persistent_fid = pfid->persistent_fid,
723 .volatile_fid = pfid->volatile_fid,
724 };
725
726 /*
727 * caller expects this func to set pfid to a valid
728 * cached root, so we copy the existing one and get a
729 * reference.
730 */
731 memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
732 kref_get(&tcon->crfid.refcount);
733
734 mutex_unlock(&tcon->crfid.fid_mutex);
735
736 if (rc == 0) {
737 /* close extra handle outside of crit sec */
738 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
739 }
740 goto oshr_free;
741 }
742
743 /* Cached root is still invalid, continue normaly */
744
Steve French7dcc82c2019-09-11 00:07:36 -0500745 if (rc) {
746 if (rc == -EREMCHG) {
747 tcon->need_reconnect = true;
748 printk_once(KERN_WARNING "server share %s deleted\n",
749 tcon->treeName);
750 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000751 goto oshr_exit;
Steve French7dcc82c2019-09-11 00:07:36 -0500752 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000753
754 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
755 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
756 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
757#ifdef CONFIG_CIFS_DEBUG2
758 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
759#endif /* CIFS_DEBUG2 */
760
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000761 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
762 tcon->crfid.tcon = tcon;
763 tcon->crfid.is_valid = true;
764 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000765
Steve French89a5bfa2019-07-18 17:22:18 -0500766 /* BB TBD check to see if oplock level check can be removed below */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000767 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
768 kref_get(&tcon->crfid.refcount);
Steve French89a5bfa2019-07-18 17:22:18 -0500769 smb2_parse_contexts(server, o_rsp,
770 &oparms.fid->epoch,
771 oparms.fid->lease_key, &oplock, NULL);
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000772 } else
773 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000774
775 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
776 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
777 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000778 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000779 le16_to_cpu(qi_rsp->OutputBufferOffset),
780 sizeof(struct smb2_file_all_info),
781 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000782 (char *)&tcon->crfid.file_all_info))
783 tcon->crfid.file_all_info_is_valid = 1;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000784
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200785oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000786 mutex_unlock(&tcon->crfid.fid_mutex);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200787oshr_free:
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000788 SMB2_open_free(&rqst[0]);
789 SMB2_query_info_free(&rqst[1]);
790 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
791 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French3d4ef9a2018-04-25 22:19:09 -0500792 return rc;
793}
794
Steve French34f62642013-10-09 02:07:00 -0500795static void
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500796smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
797{
798 int rc;
799 __le16 srch_path = 0; /* Null - open root of share */
800 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
801 struct cifs_open_parms oparms;
802 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500803 bool no_cached_open = tcon->nohandlecache;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500804
805 oparms.tcon = tcon;
806 oparms.desired_access = FILE_READ_ATTRIBUTES;
807 oparms.disposition = FILE_OPEN;
808 oparms.create_options = 0;
809 oparms.fid = &fid;
810 oparms.reconnect = false;
811
Steve French3d4ef9a2018-04-25 22:19:09 -0500812 if (no_cached_open)
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000813 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
814 NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500815 else
816 rc = open_shroot(xid, tcon, &fid);
817
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500818 if (rc)
819 return;
820
Steve Frenchc481e9f2013-10-14 01:21:53 -0500821 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500822
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500823 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
824 FS_ATTRIBUTE_INFORMATION);
825 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
826 FS_DEVICE_INFORMATION);
827 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500828 FS_VOLUME_INFORMATION);
829 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500830 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500831 if (no_cached_open)
832 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000833 else
834 close_shroot(&tcon->crfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500835}
836
837static void
Steve French34f62642013-10-09 02:07:00 -0500838smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
839{
840 int rc;
841 __le16 srch_path = 0; /* Null - open root of share */
842 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
843 struct cifs_open_parms oparms;
844 struct cifs_fid fid;
845
846 oparms.tcon = tcon;
847 oparms.desired_access = FILE_READ_ATTRIBUTES;
848 oparms.disposition = FILE_OPEN;
849 oparms.create_options = 0;
850 oparms.fid = &fid;
851 oparms.reconnect = false;
852
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000853 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500854 if (rc)
855 return;
856
Steven French21671142013-10-09 13:36:35 -0500857 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
858 FS_ATTRIBUTE_INFORMATION);
859 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
860 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500861 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -0500862}
863
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400864static int
865smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
866 struct cifs_sb_info *cifs_sb, const char *full_path)
867{
868 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400869 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700870 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400871 struct cifs_open_parms oparms;
872 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400873
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000874 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500875 return 0;
876
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400877 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
878 if (!utf16_path)
879 return -ENOMEM;
880
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400881 oparms.tcon = tcon;
882 oparms.desired_access = FILE_READ_ATTRIBUTES;
883 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -0500884 if (backup_cred(cifs_sb))
885 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
886 else
887 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400888 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400889 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400890
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000891 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400892 if (rc) {
893 kfree(utf16_path);
894 return rc;
895 }
896
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400897 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400898 kfree(utf16_path);
899 return rc;
900}
901
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400902static int
903smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
904 struct cifs_sb_info *cifs_sb, const char *full_path,
905 u64 *uniqueid, FILE_ALL_INFO *data)
906{
907 *uniqueid = le64_to_cpu(data->IndexNumber);
908 return 0;
909}
910
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700911static int
912smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
913 struct cifs_fid *fid, FILE_ALL_INFO *data)
914{
915 int rc;
916 struct smb2_file_all_info *smb2_data;
917
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400918 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700919 GFP_KERNEL);
920 if (smb2_data == NULL)
921 return -ENOMEM;
922
923 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
924 smb2_data);
925 if (!rc)
926 move_smb2_info_to_cifs(data, smb2_data);
927 kfree(smb2_data);
928 return rc;
929}
930
Arnd Bergmann1368f152017-09-05 11:24:15 +0200931#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000932static ssize_t
933move_smb2_ea_to_cifs(char *dst, size_t dst_size,
934 struct smb2_file_full_ea_info *src, size_t src_size,
935 const unsigned char *ea_name)
936{
937 int rc = 0;
938 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
939 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000940 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000941 size_t name_len, value_len, user_name_len;
942
943 while (src_size > 0) {
944 name = &src->ea_data[0];
945 name_len = (size_t)src->ea_name_length;
946 value = &src->ea_data[src->ea_name_length + 1];
947 value_len = (size_t)le16_to_cpu(src->ea_value_length);
948
Christoph Probsta205d502019-05-08 21:36:25 +0200949 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000950 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000951
952 if (src_size < 8 + name_len + 1 + value_len) {
953 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
954 rc = -EIO;
955 goto out;
956 }
957
958 if (ea_name) {
959 if (ea_name_len == name_len &&
960 memcmp(ea_name, name, name_len) == 0) {
961 rc = value_len;
962 if (dst_size == 0)
963 goto out;
964 if (dst_size < value_len) {
965 rc = -ERANGE;
966 goto out;
967 }
968 memcpy(dst, value, value_len);
969 goto out;
970 }
971 } else {
972 /* 'user.' plus a terminating null */
973 user_name_len = 5 + 1 + name_len;
974
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000975 if (buf_size == 0) {
976 /* skip copy - calc size only */
977 rc += user_name_len;
978 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000979 dst_size -= user_name_len;
980 memcpy(dst, "user.", 5);
981 dst += 5;
982 memcpy(dst, src->ea_data, name_len);
983 dst += name_len;
984 *dst = 0;
985 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000986 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000987 } else {
988 /* stop before overrun buffer */
989 rc = -ERANGE;
990 break;
991 }
992 }
993
994 if (!src->next_entry_offset)
995 break;
996
997 if (src_size < le32_to_cpu(src->next_entry_offset)) {
998 /* stop before overrun buffer */
999 rc = -ERANGE;
1000 break;
1001 }
1002 src_size -= le32_to_cpu(src->next_entry_offset);
1003 src = (void *)((char *)src +
1004 le32_to_cpu(src->next_entry_offset));
1005 }
1006
1007 /* didn't find the named attribute */
1008 if (ea_name)
1009 rc = -ENODATA;
1010
1011out:
1012 return (ssize_t)rc;
1013}
1014
1015static ssize_t
1016smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1017 const unsigned char *path, const unsigned char *ea_name,
1018 char *ea_data, size_t buf_size,
1019 struct cifs_sb_info *cifs_sb)
1020{
1021 int rc;
1022 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001023 struct kvec rsp_iov = {NULL, 0};
1024 int buftype = CIFS_NO_BUFFER;
1025 struct smb2_query_info_rsp *rsp;
1026 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001027
1028 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1029 if (!utf16_path)
1030 return -ENOMEM;
1031
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001032 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1033 FILE_READ_EA,
1034 FILE_FULL_EA_INFORMATION,
1035 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +10001036 CIFSMaxBufSize -
1037 MAX_SMB2_CREATE_RESPONSE_SIZE -
1038 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001039 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001040 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001041 /*
1042 * If ea_name is NULL (listxattr) and there are no EAs,
1043 * return 0 as it's not an error. Otherwise, the specified
1044 * ea_name was not found.
1045 */
1046 if (!ea_name && rc == -ENODATA)
1047 rc = 0;
1048 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001049 }
1050
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001051 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1052 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1053 le32_to_cpu(rsp->OutputBufferLength),
1054 &rsp_iov,
1055 sizeof(struct smb2_file_full_ea_info));
1056 if (rc)
1057 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001058
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001059 info = (struct smb2_file_full_ea_info *)(
1060 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1061 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1062 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001063
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001064 qeas_exit:
1065 kfree(utf16_path);
1066 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001067 return rc;
1068}
1069
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001070
1071static int
1072smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1073 const char *path, const char *ea_name, const void *ea_value,
1074 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1075 struct cifs_sb_info *cifs_sb)
1076{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001077 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001078 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001079 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001080 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001081 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001082 struct smb_rqst rqst[3];
1083 int resp_buftype[3];
1084 struct kvec rsp_iov[3];
1085 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1086 struct cifs_open_parms oparms;
1087 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1088 struct cifs_fid fid;
1089 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1090 unsigned int size[1];
1091 void *data[1];
1092 struct smb2_file_full_ea_info *ea = NULL;
1093 struct kvec close_iov[1];
1094 int rc;
1095
1096 if (smb3_encryption_required(tcon))
1097 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001098
1099 if (ea_name_len > 255)
1100 return -EINVAL;
1101
1102 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1103 if (!utf16_path)
1104 return -ENOMEM;
1105
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001106 memset(rqst, 0, sizeof(rqst));
1107 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1108 memset(rsp_iov, 0, sizeof(rsp_iov));
1109
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001110 if (ses->server->ops->query_all_EAs) {
1111 if (!ea_value) {
1112 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1113 ea_name, NULL, 0,
1114 cifs_sb);
1115 if (rc == -ENODATA)
1116 goto sea_exit;
1117 }
1118 }
1119
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001120 /* Open */
1121 memset(&open_iov, 0, sizeof(open_iov));
1122 rqst[0].rq_iov = open_iov;
1123 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1124
1125 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001126 oparms.tcon = tcon;
1127 oparms.desired_access = FILE_WRITE_EA;
1128 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001129 if (backup_cred(cifs_sb))
1130 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1131 else
1132 oparms.create_options = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001133 oparms.fid = &fid;
1134 oparms.reconnect = false;
1135
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001136 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
1137 if (rc)
1138 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001139 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001140
1141
1142 /* Set Info */
1143 memset(&si_iov, 0, sizeof(si_iov));
1144 rqst[1].rq_iov = si_iov;
1145 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001146
1147 len = sizeof(ea) + ea_name_len + ea_value_len + 1;
1148 ea = kzalloc(len, GFP_KERNEL);
1149 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001150 rc = -ENOMEM;
1151 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001152 }
1153
1154 ea->ea_name_length = ea_name_len;
1155 ea->ea_value_length = cpu_to_le16(ea_value_len);
1156 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1157 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1158
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001159 size[0] = len;
1160 data[0] = ea;
1161
1162 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
1163 COMPOUND_FID, current->tgid,
1164 FILE_FULL_EA_INFORMATION,
1165 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001166 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001167 smb2_set_related(&rqst[1]);
1168
1169
1170 /* Close */
1171 memset(&close_iov, 0, sizeof(close_iov));
1172 rqst[2].rq_iov = close_iov;
1173 rqst[2].rq_nvec = 1;
1174 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
1175 smb2_set_related(&rqst[2]);
1176
1177 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1178 resp_buftype, rsp_iov);
1179
1180 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001181 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001182 kfree(utf16_path);
1183 SMB2_open_free(&rqst[0]);
1184 SMB2_set_info_free(&rqst[1]);
1185 SMB2_close_free(&rqst[2]);
1186 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1187 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1188 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001189 return rc;
1190}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001191#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001192
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001193static bool
1194smb2_can_echo(struct TCP_Server_Info *server)
1195{
1196 return server->echoes;
1197}
1198
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001199static void
1200smb2_clear_stats(struct cifs_tcon *tcon)
1201{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001202 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001203
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001204 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1205 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1206 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1207 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001208}
1209
1210static void
Steve French769ee6a2013-06-19 14:15:30 -05001211smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1212{
1213 seq_puts(m, "\n\tShare Capabilities:");
1214 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1215 seq_puts(m, " DFS,");
1216 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1217 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1218 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1219 seq_puts(m, " SCALEOUT,");
1220 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1221 seq_puts(m, " CLUSTER,");
1222 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1223 seq_puts(m, " ASYMMETRIC,");
1224 if (tcon->capabilities == 0)
1225 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001226 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1227 seq_puts(m, " Aligned,");
1228 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1229 seq_puts(m, " Partition Aligned,");
1230 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1231 seq_puts(m, " SSD,");
1232 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1233 seq_puts(m, " TRIM-support,");
1234
Steve French769ee6a2013-06-19 14:15:30 -05001235 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001236 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001237 if (tcon->perf_sector_size)
1238 seq_printf(m, "\tOptimal sector size: 0x%x",
1239 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001240 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001241}
1242
1243static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001244smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1245{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001246 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1247 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001248
1249 /*
1250 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1251 * totals (requests sent) since those SMBs are per-session not per tcon
1252 */
Steve French52ce1ac2018-07-31 01:46:47 -05001253 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1254 (long long)(tcon->bytes_read),
1255 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001256 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1257 atomic_read(&tcon->num_local_opens),
1258 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001259 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001260 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1261 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001262 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001263 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1264 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001265 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001266 atomic_read(&sent[SMB2_CREATE_HE]),
1267 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001268 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001269 atomic_read(&sent[SMB2_CLOSE_HE]),
1270 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001271 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001272 atomic_read(&sent[SMB2_FLUSH_HE]),
1273 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001274 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001275 atomic_read(&sent[SMB2_READ_HE]),
1276 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001277 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001278 atomic_read(&sent[SMB2_WRITE_HE]),
1279 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001280 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001281 atomic_read(&sent[SMB2_LOCK_HE]),
1282 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001283 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001284 atomic_read(&sent[SMB2_IOCTL_HE]),
1285 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001286 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001287 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1288 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001289 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001290 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1291 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001292 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001293 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1294 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001295 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001296 atomic_read(&sent[SMB2_SET_INFO_HE]),
1297 atomic_read(&failed[SMB2_SET_INFO_HE]));
1298 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1299 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1300 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001301}
1302
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001303static void
1304smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1305{
David Howells2b0143b2015-03-17 22:25:59 +00001306 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001307 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1308
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001309 cfile->fid.persistent_fid = fid->persistent_fid;
1310 cfile->fid.volatile_fid = fid->volatile_fid;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001311#ifdef CONFIG_CIFS_DEBUG2
1312 cfile->fid.mid = fid->mid;
1313#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001314 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1315 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001316 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001317 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001318}
1319
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001320static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001321smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1322 struct cifs_fid *fid)
1323{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001324 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001325}
1326
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001327static int
Steve French41c13582013-11-14 00:05:36 -06001328SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1329 u64 persistent_fid, u64 volatile_fid,
1330 struct copychunk_ioctl *pcchunk)
1331{
1332 int rc;
1333 unsigned int ret_data_len;
1334 struct resume_key_req *res_key;
1335
1336 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1337 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001338 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001339 (char **)&res_key, &ret_data_len);
1340
1341 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001342 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
Steve French41c13582013-11-14 00:05:36 -06001343 goto req_res_key_exit;
1344 }
1345 if (ret_data_len < sizeof(struct resume_key_req)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001346 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
Steve French41c13582013-11-14 00:05:36 -06001347 rc = -EINVAL;
1348 goto req_res_key_exit;
1349 }
1350 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1351
1352req_res_key_exit:
1353 kfree(res_key);
1354 return rc;
1355}
1356
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001357static int
1358smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001359 struct cifs_tcon *tcon,
1360 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001361 unsigned long p)
1362{
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001363 struct cifs_ses *ses = tcon->ses;
1364 char __user *arg = (char __user *)p;
1365 struct smb_query_info qi;
1366 struct smb_query_info __user *pqi;
1367 int rc = 0;
1368 int flags = 0;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001369 struct smb2_query_info_rsp *qi_rsp = NULL;
1370 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001371 void *buffer = NULL;
1372 struct smb_rqst rqst[3];
1373 int resp_buftype[3];
1374 struct kvec rsp_iov[3];
1375 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1376 struct cifs_open_parms oparms;
1377 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1378 struct cifs_fid fid;
1379 struct kvec qi_iov[1];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001380 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001381 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001382 struct kvec close_iov[1];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001383 unsigned int size[2];
1384 void *data[2];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001385
1386 memset(rqst, 0, sizeof(rqst));
1387 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1388 memset(rsp_iov, 0, sizeof(rsp_iov));
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001389
1390 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1391 return -EFAULT;
1392
1393 if (qi.output_buffer_length > 1024)
1394 return -EINVAL;
1395
1396 if (!ses || !(ses->server))
1397 return -EIO;
1398
1399 if (smb3_encryption_required(tcon))
1400 flags |= CIFS_TRANSFORM_REQ;
1401
1402 buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
1403 if (buffer == NULL)
1404 return -ENOMEM;
1405
1406 if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
1407 qi.output_buffer_length)) {
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001408 rc = -EFAULT;
1409 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001410 }
1411
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001412 /* Open */
1413 memset(&open_iov, 0, sizeof(open_iov));
1414 rqst[0].rq_iov = open_iov;
1415 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001416
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001417 memset(&oparms, 0, sizeof(oparms));
1418 oparms.tcon = tcon;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001419 oparms.disposition = FILE_OPEN;
1420 if (is_dir)
1421 oparms.create_options = CREATE_NOT_FILE;
1422 else
1423 oparms.create_options = CREATE_NOT_DIR;
1424 oparms.fid = &fid;
1425 oparms.reconnect = false;
1426
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001427 if (qi.flags & PASSTHRU_FSCTL) {
1428 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1429 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1430 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001431 break;
1432 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1433 oparms.desired_access = GENERIC_ALL;
1434 break;
1435 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1436 oparms.desired_access = GENERIC_READ;
1437 break;
1438 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1439 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001440 break;
1441 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001442 } else if (qi.flags & PASSTHRU_SET_INFO) {
1443 oparms.desired_access = GENERIC_WRITE;
1444 } else {
1445 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001446 }
1447
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001448 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1449 if (rc)
1450 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001451 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001452
1453 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001454 if (qi.flags & PASSTHRU_FSCTL) {
1455 /* Can eventually relax perm check since server enforces too */
1456 if (!capable(CAP_SYS_ADMIN))
1457 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001458 else {
1459 memset(&io_iov, 0, sizeof(io_iov));
1460 rqst[1].rq_iov = io_iov;
1461 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1462
1463 rc = SMB2_ioctl_init(tcon, &rqst[1],
1464 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001465 qi.info_type, true, buffer,
1466 qi.output_buffer_length,
1467 CIFSMaxBufSize);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001468 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001469 } else if (qi.flags == PASSTHRU_SET_INFO) {
1470 /* Can eventually relax perm check since server enforces too */
1471 if (!capable(CAP_SYS_ADMIN))
1472 rc = -EPERM;
1473 else {
1474 memset(&si_iov, 0, sizeof(si_iov));
1475 rqst[1].rq_iov = si_iov;
1476 rqst[1].rq_nvec = 1;
1477
1478 size[0] = 8;
1479 data[0] = buffer;
1480
1481 rc = SMB2_set_info_init(tcon, &rqst[1],
1482 COMPOUND_FID, COMPOUND_FID,
1483 current->tgid,
1484 FILE_END_OF_FILE_INFORMATION,
1485 SMB2_O_INFO_FILE, 0, data, size);
1486 }
Steve French31ba4332019-03-13 02:40:07 -05001487 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1488 memset(&qi_iov, 0, sizeof(qi_iov));
1489 rqst[1].rq_iov = qi_iov;
1490 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001491
Steve French31ba4332019-03-13 02:40:07 -05001492 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
1493 COMPOUND_FID, qi.file_info_class,
1494 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001495 qi.input_buffer_length,
1496 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001497 } else { /* unknown flags */
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001498 cifs_tcon_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
Steve French31ba4332019-03-13 02:40:07 -05001499 rc = -EINVAL;
1500 }
1501
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001502 if (rc)
1503 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001504 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001505 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001506
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001507 /* Close */
1508 memset(&close_iov, 0, sizeof(close_iov));
1509 rqst[2].rq_iov = close_iov;
1510 rqst[2].rq_nvec = 1;
1511
1512 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001513 if (rc)
1514 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001515 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001516
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001517 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1518 resp_buftype, rsp_iov);
1519 if (rc)
1520 goto iqinf_exit;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001521 if (qi.flags & PASSTHRU_FSCTL) {
1522 pqi = (struct smb_query_info __user *)arg;
1523 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1524 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1525 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001526 if (qi.input_buffer_length > 0 &&
1527 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length > rsp_iov[1].iov_len) {
1528 rc = -EFAULT;
1529 goto iqinf_exit;
1530 }
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001531 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1532 sizeof(qi.input_buffer_length))) {
1533 rc = -EFAULT;
1534 goto iqinf_exit;
1535 }
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001536 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1537 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
1538 qi.input_buffer_length)) {
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001539 rc = -EFAULT;
1540 goto iqinf_exit;
1541 }
1542 } else {
1543 pqi = (struct smb_query_info __user *)arg;
1544 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1545 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1546 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
1547 if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
1548 sizeof(qi.input_buffer_length))) {
1549 rc = -EFAULT;
1550 goto iqinf_exit;
1551 }
1552 if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) {
1553 rc = -EFAULT;
1554 goto iqinf_exit;
1555 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001556 }
1557
1558 iqinf_exit:
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001559 kfree(buffer);
1560 SMB2_open_free(&rqst[0]);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001561 if (qi.flags & PASSTHRU_FSCTL)
1562 SMB2_ioctl_free(&rqst[1]);
1563 else
1564 SMB2_query_info_free(&rqst[1]);
1565
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001566 SMB2_close_free(&rqst[2]);
1567 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1568 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1569 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001570 return rc;
1571}
1572
Sachin Prabhu620d8742017-02-10 16:03:51 +05301573static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001574smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001575 struct cifsFileInfo *srcfile,
1576 struct cifsFileInfo *trgtfile, u64 src_off,
1577 u64 len, u64 dest_off)
1578{
1579 int rc;
1580 unsigned int ret_data_len;
1581 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001582 struct copychunk_ioctl_rsp *retbuf = NULL;
1583 struct cifs_tcon *tcon;
1584 int chunks_copied = 0;
1585 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301586 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001587
1588 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1589
1590 if (pcchunk == NULL)
1591 return -ENOMEM;
1592
Christoph Probsta205d502019-05-08 21:36:25 +02001593 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001594 /* Request a key from the server to identify the source of the copy */
1595 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1596 srcfile->fid.persistent_fid,
1597 srcfile->fid.volatile_fid, pcchunk);
1598
1599 /* Note: request_res_key sets res_key null only if rc !=0 */
1600 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001601 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001602
1603 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001604 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001605 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001606 pcchunk->Reserved2 = 0;
1607
Steve French9bf0c9c2013-11-16 18:05:28 -06001608 tcon = tlink_tcon(trgtfile->tlink);
1609
1610 while (len > 0) {
1611 pcchunk->SourceOffset = cpu_to_le64(src_off);
1612 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1613 pcchunk->Length =
1614 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1615
1616 /* Request server copy to target from src identified by key */
1617 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001618 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001619 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001620 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1621 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001622 if (rc == 0) {
1623 if (ret_data_len !=
1624 sizeof(struct copychunk_ioctl_rsp)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001625 cifs_tcon_dbg(VFS, "invalid cchunk response size\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001626 rc = -EIO;
1627 goto cchunk_out;
1628 }
1629 if (retbuf->TotalBytesWritten == 0) {
1630 cifs_dbg(FYI, "no bytes copied\n");
1631 rc = -EIO;
1632 goto cchunk_out;
1633 }
1634 /*
1635 * Check if server claimed to write more than we asked
1636 */
1637 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1638 le32_to_cpu(pcchunk->Length)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001639 cifs_tcon_dbg(VFS, "invalid copy chunk response\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001640 rc = -EIO;
1641 goto cchunk_out;
1642 }
1643 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001644 cifs_tcon_dbg(VFS, "invalid num chunks written\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001645 rc = -EIO;
1646 goto cchunk_out;
1647 }
1648 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001649
Sachin Prabhu620d8742017-02-10 16:03:51 +05301650 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1651 src_off += bytes_written;
1652 dest_off += bytes_written;
1653 len -= bytes_written;
1654 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001655
Sachin Prabhu620d8742017-02-10 16:03:51 +05301656 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001657 le32_to_cpu(retbuf->ChunksWritten),
1658 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301659 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001660 } else if (rc == -EINVAL) {
1661 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1662 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001663
Steve French9bf0c9c2013-11-16 18:05:28 -06001664 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1665 le32_to_cpu(retbuf->ChunksWritten),
1666 le32_to_cpu(retbuf->ChunkBytesWritten),
1667 le32_to_cpu(retbuf->TotalBytesWritten));
1668
1669 /*
1670 * Check if this is the first request using these sizes,
1671 * (ie check if copy succeed once with original sizes
1672 * and check if the server gave us different sizes after
1673 * we already updated max sizes on previous request).
1674 * if not then why is the server returning an error now
1675 */
1676 if ((chunks_copied != 0) || chunk_sizes_updated)
1677 goto cchunk_out;
1678
1679 /* Check that server is not asking us to grow size */
1680 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1681 tcon->max_bytes_chunk)
1682 tcon->max_bytes_chunk =
1683 le32_to_cpu(retbuf->ChunkBytesWritten);
1684 else
1685 goto cchunk_out; /* server gave us bogus size */
1686
1687 /* No need to change MaxChunks since already set to 1 */
1688 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001689 } else
1690 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001691 }
1692
1693cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001694 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001695 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301696 if (rc)
1697 return rc;
1698 else
1699 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001700}
1701
1702static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001703smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1704 struct cifs_fid *fid)
1705{
1706 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1707}
1708
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001709static unsigned int
1710smb2_read_data_offset(char *buf)
1711{
1712 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001713
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001714 return rsp->DataOffset;
1715}
1716
1717static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001718smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001719{
1720 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001721
1722 if (in_remaining)
1723 return le32_to_cpu(rsp->DataRemaining);
1724
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001725 return le32_to_cpu(rsp->DataLength);
1726}
1727
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001728
1729static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001730smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001731 struct cifs_io_parms *parms, unsigned int *bytes_read,
1732 char **buf, int *buf_type)
1733{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001734 parms->persistent_fid = pfid->persistent_fid;
1735 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001736 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1737}
1738
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001739static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001740smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001741 struct cifs_io_parms *parms, unsigned int *written,
1742 struct kvec *iov, unsigned long nr_segs)
1743{
1744
Steve Frenchdb8b6312014-09-22 05:13:55 -05001745 parms->persistent_fid = pfid->persistent_fid;
1746 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001747 return SMB2_write(xid, parms, written, iov, nr_segs);
1748}
1749
Steve Frenchd43cc792014-08-13 17:16:29 -05001750/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1751static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1752 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1753{
1754 struct cifsInodeInfo *cifsi;
1755 int rc;
1756
1757 cifsi = CIFS_I(inode);
1758
1759 /* if file already sparse don't bother setting sparse again */
1760 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1761 return true; /* already sparse */
1762
1763 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1764 return true; /* already not sparse */
1765
1766 /*
1767 * Can't check for sparse support on share the usual way via the
1768 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1769 * since Samba server doesn't set the flag on the share, yet
1770 * supports the set sparse FSCTL and returns sparse correctly
1771 * in the file attributes. If we fail setting sparse though we
1772 * mark that server does not support sparse files for this share
1773 * to avoid repeatedly sending the unsupported fsctl to server
1774 * if the file is repeatedly extended.
1775 */
1776 if (tcon->broken_sparse_sup)
1777 return false;
1778
1779 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1780 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001781 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05001782 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001783 if (rc) {
1784 tcon->broken_sparse_sup = true;
1785 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1786 return false;
1787 }
1788
1789 if (setsparse)
1790 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1791 else
1792 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1793
1794 return true;
1795}
1796
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001797static int
1798smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1799 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1800{
1801 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001802 struct inode *inode;
1803
1804 /*
1805 * If extending file more than one page make sparse. Many Linux fs
1806 * make files sparse by default when extending via ftruncate
1807 */
David Howells2b0143b2015-03-17 22:25:59 +00001808 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001809
1810 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001811 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001812
Steve Frenchd43cc792014-08-13 17:16:29 -05001813 /* whether set sparse succeeds or not, extend the file */
1814 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001815 }
1816
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001817 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001818 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001819}
1820
Steve French02b16662015-06-27 21:18:36 -07001821static int
1822smb2_duplicate_extents(const unsigned int xid,
1823 struct cifsFileInfo *srcfile,
1824 struct cifsFileInfo *trgtfile, u64 src_off,
1825 u64 len, u64 dest_off)
1826{
1827 int rc;
1828 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001829 struct duplicate_extents_to_file dup_ext_buf;
1830 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1831
1832 /* server fileays advertise duplicate extent support with this flag */
1833 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1834 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1835 return -EOPNOTSUPP;
1836
1837 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1838 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1839 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1840 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1841 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02001842 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07001843 src_off, dest_off, len);
1844
1845 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1846 if (rc)
1847 goto duplicate_extents_out;
1848
1849 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1850 trgtfile->fid.volatile_fid,
1851 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001852 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001853 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001854 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05001855 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07001856 &ret_data_len);
1857
1858 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02001859 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07001860
1861duplicate_extents_out:
1862 return rc;
1863}
Steve French02b16662015-06-27 21:18:36 -07001864
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001865static int
Steve French64a5cfa2013-10-14 15:31:32 -05001866smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1867 struct cifsFileInfo *cfile)
1868{
1869 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1870 cfile->fid.volatile_fid);
1871}
1872
1873static int
Steve Frenchb3152e22015-06-24 03:17:02 -05001874smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1875 struct cifsFileInfo *cfile)
1876{
1877 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05001878 unsigned int ret_data_len;
1879
1880 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1881 integr_info.Flags = 0;
1882 integr_info.Reserved = 0;
1883
1884 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1885 cfile->fid.volatile_fid,
1886 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001887 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001888 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05001889 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05001890 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05001891 &ret_data_len);
1892
1893}
1894
Steve Frenche02789a2018-08-09 14:33:12 -05001895/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1896#define GMT_TOKEN_SIZE 50
1897
Steve French153322f2019-03-28 22:32:49 -05001898#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1899
Steve Frenche02789a2018-08-09 14:33:12 -05001900/*
1901 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1902 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1903 */
Steve Frenchb3152e22015-06-24 03:17:02 -05001904static int
Steve French834170c2016-09-30 21:14:26 -05001905smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1906 struct cifsFileInfo *cfile, void __user *ioc_buf)
1907{
1908 char *retbuf = NULL;
1909 unsigned int ret_data_len = 0;
1910 int rc;
Steve French153322f2019-03-28 22:32:49 -05001911 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05001912 struct smb_snapshot_array snapshot_in;
1913
Steve French973189a2019-04-04 00:41:04 -05001914 /*
1915 * On the first query to enumerate the list of snapshots available
1916 * for this volume the buffer begins with 0 (number of snapshots
1917 * which can be returned is zero since at that point we do not know
1918 * how big the buffer needs to be). On the second query,
1919 * it (ret_data_len) is set to number of snapshots so we can
1920 * know to set the maximum response size larger (see below).
1921 */
Steve French153322f2019-03-28 22:32:49 -05001922 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
1923 return -EFAULT;
1924
1925 /*
1926 * Note that for snapshot queries that servers like Azure expect that
1927 * the first query be minimal size (and just used to get the number/size
1928 * of previous versions) so response size must be specified as EXACTLY
1929 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
1930 * of eight bytes.
1931 */
1932 if (ret_data_len == 0)
1933 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
1934 else
1935 max_response_size = CIFSMaxBufSize;
1936
Steve French834170c2016-09-30 21:14:26 -05001937 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1938 cfile->fid.volatile_fid,
1939 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001940 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001941 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05001942 (char **)&retbuf,
1943 &ret_data_len);
1944 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
1945 rc, ret_data_len);
1946 if (rc)
1947 return rc;
1948
1949 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
1950 /* Fixup buffer */
1951 if (copy_from_user(&snapshot_in, ioc_buf,
1952 sizeof(struct smb_snapshot_array))) {
1953 rc = -EFAULT;
1954 kfree(retbuf);
1955 return rc;
1956 }
Steve French834170c2016-09-30 21:14:26 -05001957
Steve Frenche02789a2018-08-09 14:33:12 -05001958 /*
1959 * Check for min size, ie not large enough to fit even one GMT
1960 * token (snapshot). On the first ioctl some users may pass in
1961 * smaller size (or zero) to simply get the size of the array
1962 * so the user space caller can allocate sufficient memory
1963 * and retry the ioctl again with larger array size sufficient
1964 * to hold all of the snapshot GMT tokens on the second try.
1965 */
1966 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
1967 ret_data_len = sizeof(struct smb_snapshot_array);
1968
1969 /*
1970 * We return struct SRV_SNAPSHOT_ARRAY, followed by
1971 * the snapshot array (of 50 byte GMT tokens) each
1972 * representing an available previous version of the data
1973 */
1974 if (ret_data_len > (snapshot_in.snapshot_array_size +
1975 sizeof(struct smb_snapshot_array)))
1976 ret_data_len = snapshot_in.snapshot_array_size +
1977 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05001978
1979 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
1980 rc = -EFAULT;
1981 }
1982
1983 kfree(retbuf);
1984 return rc;
1985}
1986
1987static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001988smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
1989 const char *path, struct cifs_sb_info *cifs_sb,
1990 struct cifs_fid *fid, __u16 search_flags,
1991 struct cifs_search_info *srch_inf)
1992{
1993 __le16 *utf16_path;
1994 int rc;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001995 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001996 struct cifs_open_parms oparms;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001997
1998 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1999 if (!utf16_path)
2000 return -ENOMEM;
2001
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002002 oparms.tcon = tcon;
2003 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2004 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05002005 if (backup_cred(cifs_sb))
2006 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2007 else
2008 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002009 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002010 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002011
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002012 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002013 kfree(utf16_path);
2014 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07002015 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002016 return rc;
2017 }
2018
2019 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02002020 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002021
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002022 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
2023 fid->volatile_fid, 0, srch_inf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002024 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07002025 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002026 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002027 }
2028 return rc;
2029}
2030
2031static int
2032smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2033 struct cifs_fid *fid, __u16 search_flags,
2034 struct cifs_search_info *srch_inf)
2035{
2036 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2037 fid->volatile_fid, 0, srch_inf);
2038}
2039
2040static int
2041smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2042 struct cifs_fid *fid)
2043{
2044 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2045}
2046
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002047/*
Christoph Probsta205d502019-05-08 21:36:25 +02002048 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2049 * the number of credits and return true. Otherwise - return false.
2050 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002051static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002052smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002053{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002054 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002055
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002056 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002057 return false;
2058
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002059 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002060 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002061 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002062 spin_unlock(&server->req_lock);
2063 wake_up(&server->request_q);
2064 }
2065
2066 return true;
2067}
2068
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002069static bool
2070smb2_is_session_expired(char *buf)
2071{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002072 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002073
Mark Symsd81243c2018-05-24 09:47:31 +01002074 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2075 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002076 return false;
2077
Steve Frenche68a9322018-07-30 14:23:58 -05002078 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2079 le16_to_cpu(shdr->Command),
2080 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002081 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002082
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002083 return true;
2084}
2085
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002086static int
2087smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2088 struct cifsInodeInfo *cinode)
2089{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002090 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2091 return SMB2_lease_break(0, tcon, cinode->lease_key,
2092 smb2_get_lease_state(cinode));
2093
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002094 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2095 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002096 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002097}
2098
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002099void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002100smb2_set_related(struct smb_rqst *rqst)
2101{
2102 struct smb2_sync_hdr *shdr;
2103
2104 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002105 if (shdr == NULL) {
2106 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2107 return;
2108 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002109 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2110}
2111
2112char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2113
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002114void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002115smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002116{
2117 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002118 struct cifs_ses *ses = tcon->ses;
2119 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002120 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002121 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002122
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002123 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2124 if (shdr == NULL) {
2125 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2126 return;
2127 }
2128
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002129 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002130
2131 /* No padding needed */
2132 if (!(len & 7))
2133 goto finished;
2134
2135 num_padding = 8 - (len & 7);
2136 if (!smb3_encryption_required(tcon)) {
2137 /*
2138 * If we do not have encryption then we can just add an extra
2139 * iov for the padding.
2140 */
2141 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2142 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2143 rqst->rq_nvec++;
2144 len += num_padding;
2145 } else {
2146 /*
2147 * We can not add a small padding iov for the encryption case
2148 * because the encryption framework can not handle the padding
2149 * iovs.
2150 * We have to flatten this into a single buffer and add
2151 * the padding to it.
2152 */
2153 for (i = 1; i < rqst->rq_nvec; i++) {
2154 memcpy(rqst->rq_iov[0].iov_base +
2155 rqst->rq_iov[0].iov_len,
2156 rqst->rq_iov[i].iov_base,
2157 rqst->rq_iov[i].iov_len);
2158 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002159 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002160 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2161 0, num_padding);
2162 rqst->rq_iov[0].iov_len += num_padding;
2163 len += num_padding;
2164 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002165 }
2166
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002167 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002168 shdr->NextCommand = cpu_to_le32(len);
2169}
2170
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002171/*
2172 * Passes the query info response back to the caller on success.
2173 * Caller need to free this with free_rsp_buf().
2174 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002175int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002176smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2177 __le16 *utf16_path, u32 desired_access,
2178 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002179 struct kvec *rsp, int *buftype,
2180 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002181{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002182 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002183 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002184 struct smb_rqst rqst[3];
2185 int resp_buftype[3];
2186 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002187 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002188 struct kvec qi_iov[1];
2189 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002190 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002191 struct cifs_open_parms oparms;
2192 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002193 int rc;
2194
2195 if (smb3_encryption_required(tcon))
2196 flags |= CIFS_TRANSFORM_REQ;
2197
2198 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002199 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002200 memset(rsp_iov, 0, sizeof(rsp_iov));
2201
2202 memset(&open_iov, 0, sizeof(open_iov));
2203 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002204 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002205
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002206 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002207 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002208 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002209 if (cifs_sb && backup_cred(cifs_sb))
2210 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2211 else
2212 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002213 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002214 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002215
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002216 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002217 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002218 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002219 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002220
2221 memset(&qi_iov, 0, sizeof(qi_iov));
2222 rqst[1].rq_iov = qi_iov;
2223 rqst[1].rq_nvec = 1;
2224
2225 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002226 class, type, 0,
2227 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002228 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002229 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002230 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002231 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002232 smb2_set_related(&rqst[1]);
2233
2234 memset(&close_iov, 0, sizeof(close_iov));
2235 rqst[2].rq_iov = close_iov;
2236 rqst[2].rq_nvec = 1;
2237
2238 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2239 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002240 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002241 smb2_set_related(&rqst[2]);
2242
2243 rc = compound_send_recv(xid, ses, flags, 3, rqst,
2244 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002245 if (rc) {
2246 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French7dcc82c2019-09-11 00:07:36 -05002247 if (rc == -EREMCHG) {
2248 tcon->need_reconnect = true;
2249 printk_once(KERN_WARNING "server share %s deleted\n",
2250 tcon->treeName);
2251 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002252 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002253 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002254 *rsp = rsp_iov[1];
2255 *buftype = resp_buftype[1];
2256
2257 qic_exit:
2258 SMB2_open_free(&rqst[0]);
2259 SMB2_query_info_free(&rqst[1]);
2260 SMB2_close_free(&rqst[2]);
2261 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2262 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2263 return rc;
2264}
2265
2266static int
2267smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2268 struct kstatfs *buf)
2269{
2270 struct smb2_query_info_rsp *rsp;
2271 struct smb2_fs_full_size_info *info = NULL;
2272 __le16 utf16_path = 0; /* Null - open root of share */
2273 struct kvec rsp_iov = {NULL, 0};
2274 int buftype = CIFS_NO_BUFFER;
2275 int rc;
2276
2277
2278 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2279 FILE_READ_ATTRIBUTES,
2280 FS_FULL_SIZE_INFORMATION,
2281 SMB2_O_INFO_FILESYSTEM,
2282 sizeof(struct smb2_fs_full_size_info),
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002283 &rsp_iov, &buftype, NULL);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002284 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002285 goto qfs_exit;
2286
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002287 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002288 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002289 info = (struct smb2_fs_full_size_info *)(
2290 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2291 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2292 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002293 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002294 sizeof(struct smb2_fs_full_size_info));
2295 if (!rc)
2296 smb2_copy_fs_info_to_kstatfs(info, buf);
2297
2298qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002299 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002300 return rc;
2301}
2302
Steve French2d304212018-06-24 23:28:12 -05002303static int
2304smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2305 struct kstatfs *buf)
2306{
2307 int rc;
2308 __le16 srch_path = 0; /* Null - open root of share */
2309 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2310 struct cifs_open_parms oparms;
2311 struct cifs_fid fid;
2312
2313 if (!tcon->posix_extensions)
2314 return smb2_queryfs(xid, tcon, buf);
2315
2316 oparms.tcon = tcon;
2317 oparms.desired_access = FILE_READ_ATTRIBUTES;
2318 oparms.disposition = FILE_OPEN;
2319 oparms.create_options = 0;
2320 oparms.fid = &fid;
2321 oparms.reconnect = false;
2322
2323 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
2324 if (rc)
2325 return rc;
2326
2327 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2328 fid.volatile_fid, buf);
2329 buf->f_type = SMB2_MAGIC_NUMBER;
2330 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2331 return rc;
2332}
Steve French2d304212018-06-24 23:28:12 -05002333
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002334static bool
2335smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2336{
2337 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2338 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2339}
2340
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002341static int
2342smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2343 __u64 length, __u32 type, int lock, int unlock, bool wait)
2344{
2345 if (unlock && !lock)
2346 type = SMB2_LOCKFLAG_UNLOCK;
2347 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2348 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2349 current->tgid, length, offset, type, wait);
2350}
2351
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002352static void
2353smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2354{
2355 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2356}
2357
2358static void
2359smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2360{
2361 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2362}
2363
2364static void
2365smb2_new_lease_key(struct cifs_fid *fid)
2366{
Steve Frenchfa70b872016-09-22 00:39:34 -05002367 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002368}
2369
Aurelien Aptel9d496402017-02-13 16:16:49 +01002370static int
2371smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2372 const char *search_name,
2373 struct dfs_info3_param **target_nodes,
2374 unsigned int *num_of_nodes,
2375 const struct nls_table *nls_codepage, int remap)
2376{
2377 int rc;
2378 __le16 *utf16_path = NULL;
2379 int utf16_path_len = 0;
2380 struct cifs_tcon *tcon;
2381 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2382 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2383 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2384
Christoph Probsta205d502019-05-08 21:36:25 +02002385 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002386
2387 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002388 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002389 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002390 tcon = ses->tcon_ipc;
2391 if (tcon == NULL) {
2392 spin_lock(&cifs_tcp_ses_lock);
2393 tcon = list_first_entry_or_null(&ses->tcon_list,
2394 struct cifs_tcon,
2395 tcon_list);
2396 if (tcon)
2397 tcon->tc_count++;
2398 spin_unlock(&cifs_tcp_ses_lock);
2399 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002400
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002401 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002402 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2403 ses);
2404 rc = -ENOTCONN;
2405 goto out;
2406 }
2407
2408 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2409 &utf16_path_len,
2410 nls_codepage, remap);
2411 if (!utf16_path) {
2412 rc = -ENOMEM;
2413 goto out;
2414 }
2415
2416 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2417 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2418 if (!dfs_req) {
2419 rc = -ENOMEM;
2420 goto out;
2421 }
2422
2423 /* Highest DFS referral version understood */
2424 dfs_req->MaxReferralLevel = DFS_VERSION;
2425
2426 /* Path to resolve in an UTF-16 null-terminated string */
2427 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2428
2429 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002430 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2431 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002432 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002433 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002434 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002435 } while (rc == -EAGAIN);
2436
2437 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002438 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002439 cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002440 goto out;
2441 }
2442
2443 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2444 num_of_nodes, target_nodes,
2445 nls_codepage, remap, search_name,
2446 true /* is_unicode */);
2447 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002448 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002449 goto out;
2450 }
2451
2452 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002453 if (tcon && !tcon->ipc) {
2454 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002455 spin_lock(&cifs_tcp_ses_lock);
2456 tcon->tc_count--;
2457 spin_unlock(&cifs_tcp_ses_lock);
2458 }
2459 kfree(utf16_path);
2460 kfree(dfs_req);
2461 kfree(dfs_rsp);
2462 return rc;
2463}
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002464
2465static int
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002466parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2467 u32 plen, char **target_path,
2468 struct cifs_sb_info *cifs_sb)
2469{
2470 unsigned int len;
2471
2472 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2473 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2474
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002475 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2476 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2477 le64_to_cpu(symlink_buf->InodeType));
2478 return -EOPNOTSUPP;
2479 }
2480
2481 *target_path = cifs_strndup_from_utf16(
2482 symlink_buf->PathBuffer,
2483 len, true, cifs_sb->local_nls);
2484 if (!(*target_path))
2485 return -ENOMEM;
2486
2487 convert_delimiter(*target_path, '/');
2488 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2489
2490 return 0;
2491}
2492
2493static int
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002494parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2495 u32 plen, char **target_path,
2496 struct cifs_sb_info *cifs_sb)
2497{
2498 unsigned int sub_len;
2499 unsigned int sub_offset;
2500
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002501 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002502
2503 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2504 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2505 if (sub_offset + 20 > plen ||
2506 sub_offset + sub_len + 20 > plen) {
2507 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2508 return -EIO;
2509 }
2510
2511 *target_path = cifs_strndup_from_utf16(
2512 symlink_buf->PathBuffer + sub_offset,
2513 sub_len, true, cifs_sb->local_nls);
2514 if (!(*target_path))
2515 return -ENOMEM;
2516
2517 convert_delimiter(*target_path, '/');
2518 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2519
2520 return 0;
2521}
2522
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002523static int
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002524parse_reparse_point(struct reparse_data_buffer *buf,
2525 u32 plen, char **target_path,
2526 struct cifs_sb_info *cifs_sb)
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002527{
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002528 if (plen < sizeof(struct reparse_data_buffer)) {
2529 cifs_dbg(VFS, "reparse buffer is too small. Must be "
2530 "at least 8 bytes but was %d\n", plen);
2531 return -EIO;
2532 }
2533
2534 if (plen < le16_to_cpu(buf->ReparseDataLength) +
2535 sizeof(struct reparse_data_buffer)) {
2536 cifs_dbg(VFS, "srv returned invalid reparse buf "
2537 "length: %d\n", plen);
2538 return -EIO;
2539 }
2540
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002541 /* See MS-FSCC 2.1.2 */
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002542 switch (le32_to_cpu(buf->ReparseTag)) {
2543 case IO_REPARSE_TAG_NFS:
2544 return parse_reparse_posix(
2545 (struct reparse_posix_data *)buf,
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002546 plen, target_path, cifs_sb);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002547 case IO_REPARSE_TAG_SYMLINK:
2548 return parse_reparse_symlink(
2549 (struct reparse_symlink_data_buffer *)buf,
2550 plen, target_path, cifs_sb);
2551 default:
2552 cifs_dbg(VFS, "srv returned unknown symlink buffer "
2553 "tag:0x%08x\n", le32_to_cpu(buf->ReparseTag));
2554 return -EOPNOTSUPP;
2555 }
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002556}
2557
Pavel Shilovsky78932422016-07-24 10:37:38 +03002558#define SMB2_SYMLINK_STRUCT_SIZE \
2559 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2560
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002561static int
2562smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002563 struct cifs_sb_info *cifs_sb, const char *full_path,
2564 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002565{
2566 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002567 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002568 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2569 struct cifs_open_parms oparms;
2570 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002571 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002572 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002573 struct smb2_symlink_err_rsp *symlink;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002574 unsigned int sub_len;
2575 unsigned int sub_offset;
2576 unsigned int print_len;
2577 unsigned int print_offset;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002578 int flags = 0;
2579 struct smb_rqst rqst[3];
2580 int resp_buftype[3];
2581 struct kvec rsp_iov[3];
2582 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2583 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
2584 struct kvec close_iov[1];
2585 struct smb2_create_rsp *create_rsp;
2586 struct smb2_ioctl_rsp *ioctl_rsp;
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002587 struct reparse_data_buffer *reparse_buf;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002588 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002589
2590 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2591
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002592 *target_path = NULL;
2593
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002594 if (smb3_encryption_required(tcon))
2595 flags |= CIFS_TRANSFORM_REQ;
2596
2597 memset(rqst, 0, sizeof(rqst));
2598 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2599 memset(rsp_iov, 0, sizeof(rsp_iov));
2600
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002601 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2602 if (!utf16_path)
2603 return -ENOMEM;
2604
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002605 /* Open */
2606 memset(&open_iov, 0, sizeof(open_iov));
2607 rqst[0].rq_iov = open_iov;
2608 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2609
2610 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002611 oparms.tcon = tcon;
2612 oparms.desired_access = FILE_READ_ATTRIBUTES;
2613 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002614
Steve French5e196972018-08-27 17:04:13 -05002615 if (backup_cred(cifs_sb))
2616 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2617 else
2618 oparms.create_options = 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002619 if (is_reparse_point)
2620 oparms.create_options = OPEN_REPARSE_POINT;
2621
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002622 oparms.fid = &fid;
2623 oparms.reconnect = false;
2624
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002625 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
2626 if (rc)
2627 goto querty_exit;
2628 smb2_set_next_command(tcon, &rqst[0]);
2629
2630
2631 /* IOCTL */
2632 memset(&io_iov, 0, sizeof(io_iov));
2633 rqst[1].rq_iov = io_iov;
2634 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
2635
2636 rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
2637 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
2638 true /* is_fctl */, NULL, 0, CIFSMaxBufSize);
2639 if (rc)
2640 goto querty_exit;
2641
2642 smb2_set_next_command(tcon, &rqst[1]);
2643 smb2_set_related(&rqst[1]);
2644
2645
2646 /* Close */
2647 memset(&close_iov, 0, sizeof(close_iov));
2648 rqst[2].rq_iov = close_iov;
2649 rqst[2].rq_nvec = 1;
2650
2651 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2652 if (rc)
2653 goto querty_exit;
2654
2655 smb2_set_related(&rqst[2]);
2656
2657 rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
2658 resp_buftype, rsp_iov);
2659
2660 create_rsp = rsp_iov[0].iov_base;
2661 if (create_rsp && create_rsp->sync_hdr.Status)
2662 err_iov = rsp_iov[0];
2663 ioctl_rsp = rsp_iov[1].iov_base;
2664
2665 /*
2666 * Open was successful and we got an ioctl response.
2667 */
2668 if ((rc == 0) && (is_reparse_point)) {
2669 /* See MS-FSCC 2.3.23 */
2670
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002671 reparse_buf = (struct reparse_data_buffer *)
2672 ((char *)ioctl_rsp +
2673 le32_to_cpu(ioctl_rsp->OutputOffset));
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002674 plen = le32_to_cpu(ioctl_rsp->OutputCount);
2675
2676 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
2677 rsp_iov[1].iov_len) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002678 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002679 plen);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002680 rc = -EIO;
2681 goto querty_exit;
2682 }
2683
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002684 rc = parse_reparse_point(reparse_buf, plen, target_path,
2685 cifs_sb);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002686 goto querty_exit;
2687 }
2688
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002689 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002690 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002691 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002692 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002693
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002694 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002695 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002696 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002697 rc = -EINVAL;
2698 goto querty_exit;
2699 }
2700
2701 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2702 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
2703 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
2704 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002705 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002706 }
2707
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002708 /* open must fail on symlink - reset rc */
2709 rc = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002710 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2711 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002712 print_len = le16_to_cpu(symlink->PrintNameLength);
2713 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2714
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002715 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002716 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002717 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002718 }
2719
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002720 if (err_iov.iov_len <
2721 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002722 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002723 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002724 }
2725
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002726 *target_path = cifs_strndup_from_utf16(
2727 (char *)symlink->PathBuffer + sub_offset,
2728 sub_len, true, cifs_sb->local_nls);
2729 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002730 rc = -ENOMEM;
2731 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002732 }
2733 convert_delimiter(*target_path, '/');
2734 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002735
2736 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002737 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002738 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002739 SMB2_open_free(&rqst[0]);
2740 SMB2_ioctl_free(&rqst[1]);
2741 SMB2_close_free(&rqst[2]);
2742 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2743 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2744 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002745 return rc;
2746}
2747
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002748static struct cifs_ntsd *
2749get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2750 const struct cifs_fid *cifsfid, u32 *pacllen)
2751{
2752 struct cifs_ntsd *pntsd = NULL;
2753 unsigned int xid;
2754 int rc = -EOPNOTSUPP;
2755 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2756
2757 if (IS_ERR(tlink))
2758 return ERR_CAST(tlink);
2759
2760 xid = get_xid();
2761 cifs_dbg(FYI, "trying to get acl\n");
2762
2763 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2764 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2765 free_xid(xid);
2766
2767 cifs_put_tlink(tlink);
2768
2769 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2770 if (rc)
2771 return ERR_PTR(rc);
2772 return pntsd;
2773
2774}
2775
2776static struct cifs_ntsd *
2777get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2778 const char *path, u32 *pacllen)
2779{
2780 struct cifs_ntsd *pntsd = NULL;
2781 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2782 unsigned int xid;
2783 int rc;
2784 struct cifs_tcon *tcon;
2785 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2786 struct cifs_fid fid;
2787 struct cifs_open_parms oparms;
2788 __le16 *utf16_path;
2789
2790 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
2791 if (IS_ERR(tlink))
2792 return ERR_CAST(tlink);
2793
2794 tcon = tlink_tcon(tlink);
2795 xid = get_xid();
2796
2797 if (backup_cred(cifs_sb))
Colin Ian King709340a2017-07-05 13:47:34 +01002798 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002799 else
2800 oparms.create_options = 0;
2801
2802 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002803 if (!utf16_path) {
2804 rc = -ENOMEM;
2805 free_xid(xid);
2806 return ERR_PTR(rc);
2807 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002808
2809 oparms.tcon = tcon;
2810 oparms.desired_access = READ_CONTROL;
2811 oparms.disposition = FILE_OPEN;
2812 oparms.fid = &fid;
2813 oparms.reconnect = false;
2814
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002815 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002816 kfree(utf16_path);
2817 if (!rc) {
2818 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2819 fid.volatile_fid, (void **)&pntsd, pacllen);
2820 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2821 }
2822
2823 cifs_put_tlink(tlink);
2824 free_xid(xid);
2825
2826 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2827 if (rc)
2828 return ERR_PTR(rc);
2829 return pntsd;
2830}
2831
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002832static int
2833set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2834 struct inode *inode, const char *path, int aclflag)
2835{
2836 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2837 unsigned int xid;
2838 int rc, access_flags = 0;
2839 struct cifs_tcon *tcon;
2840 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2841 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2842 struct cifs_fid fid;
2843 struct cifs_open_parms oparms;
2844 __le16 *utf16_path;
2845
2846 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
2847 if (IS_ERR(tlink))
2848 return PTR_ERR(tlink);
2849
2850 tcon = tlink_tcon(tlink);
2851 xid = get_xid();
2852
2853 if (backup_cred(cifs_sb))
2854 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2855 else
2856 oparms.create_options = 0;
2857
2858 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
2859 access_flags = WRITE_OWNER;
2860 else
2861 access_flags = WRITE_DAC;
2862
2863 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002864 if (!utf16_path) {
2865 rc = -ENOMEM;
2866 free_xid(xid);
2867 return rc;
2868 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002869
2870 oparms.tcon = tcon;
2871 oparms.desired_access = access_flags;
2872 oparms.disposition = FILE_OPEN;
2873 oparms.path = path;
2874 oparms.fid = &fid;
2875 oparms.reconnect = false;
2876
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002877 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002878 kfree(utf16_path);
2879 if (!rc) {
2880 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2881 fid.volatile_fid, pnntsd, acllen, aclflag);
2882 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2883 }
2884
2885 cifs_put_tlink(tlink);
2886 free_xid(xid);
2887 return rc;
2888}
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002889
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002890/* Retrieve an ACL from the server */
2891static struct cifs_ntsd *
2892get_smb2_acl(struct cifs_sb_info *cifs_sb,
2893 struct inode *inode, const char *path,
2894 u32 *pacllen)
2895{
2896 struct cifs_ntsd *pntsd = NULL;
2897 struct cifsFileInfo *open_file = NULL;
2898
2899 if (inode)
2900 open_file = find_readable_file(CIFS_I(inode), true);
2901 if (!open_file)
2902 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
2903
2904 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
2905 cifsFileInfo_put(open_file);
2906 return pntsd;
2907}
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002908
Steve French30175622014-08-17 18:16:40 -05002909static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2910 loff_t offset, loff_t len, bool keep_size)
2911{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002912 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05002913 struct inode *inode;
2914 struct cifsInodeInfo *cifsi;
2915 struct cifsFileInfo *cfile = file->private_data;
2916 struct file_zero_data_information fsctl_buf;
2917 long rc;
2918 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002919 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05002920
2921 xid = get_xid();
2922
David Howells2b0143b2015-03-17 22:25:59 +00002923 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05002924 cifsi = CIFS_I(inode);
2925
Christoph Probsta205d502019-05-08 21:36:25 +02002926 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05002927 ses->Suid, offset, len);
2928
2929
Steve French30175622014-08-17 18:16:40 -05002930 /* if file not oplocked can't be sure whether asking to extend size */
2931 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002932 if (keep_size == false) {
2933 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002934 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
2935 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002936 free_xid(xid);
2937 return rc;
2938 }
Steve French30175622014-08-17 18:16:40 -05002939
Steve Frenchd1c35af2019-05-09 00:09:37 -05002940 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05002941
2942 fsctl_buf.FileOffset = cpu_to_le64(offset);
2943 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2944
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10002945 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2946 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
2947 (char *)&fsctl_buf,
2948 sizeof(struct file_zero_data_information),
2949 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002950 if (rc)
2951 goto zero_range_exit;
2952
2953 /*
2954 * do we also need to change the size of the file?
2955 */
2956 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002957 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10002958 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
2959 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002960 }
2961
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002962 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05002963 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05002964 if (rc)
2965 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
2966 ses->Suid, offset, len, rc);
2967 else
2968 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
2969 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05002970 return rc;
2971}
2972
Steve French31742c52014-08-17 08:38:47 -05002973static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2974 loff_t offset, loff_t len)
2975{
2976 struct inode *inode;
Steve French31742c52014-08-17 08:38:47 -05002977 struct cifsFileInfo *cfile = file->private_data;
2978 struct file_zero_data_information fsctl_buf;
2979 long rc;
2980 unsigned int xid;
2981 __u8 set_sparse = 1;
2982
2983 xid = get_xid();
2984
David Howells2b0143b2015-03-17 22:25:59 +00002985 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05002986
2987 /* Need to make file sparse, if not already, before freeing range. */
2988 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05002989 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
2990 rc = -EOPNOTSUPP;
2991 free_xid(xid);
2992 return rc;
2993 }
Steve French31742c52014-08-17 08:38:47 -05002994
Christoph Probsta205d502019-05-08 21:36:25 +02002995 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05002996
2997 fsctl_buf.FileOffset = cpu_to_le64(offset);
2998 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2999
3000 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3001 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01003002 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05003003 sizeof(struct file_zero_data_information),
3004 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05003005 free_xid(xid);
3006 return rc;
3007}
3008
Steve French9ccf3212014-10-18 17:01:15 -05003009static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3010 loff_t off, loff_t len, bool keep_size)
3011{
3012 struct inode *inode;
3013 struct cifsInodeInfo *cifsi;
3014 struct cifsFileInfo *cfile = file->private_data;
3015 long rc = -EOPNOTSUPP;
3016 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003017 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05003018
3019 xid = get_xid();
3020
David Howells2b0143b2015-03-17 22:25:59 +00003021 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05003022 cifsi = CIFS_I(inode);
3023
Steve French779ede02019-03-13 01:41:49 -05003024 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3025 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003026 /* if file not oplocked can't be sure whether asking to extend size */
3027 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003028 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05003029 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3030 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003031 free_xid(xid);
3032 return rc;
3033 }
Steve French9ccf3212014-10-18 17:01:15 -05003034
3035 /*
3036 * Files are non-sparse by default so falloc may be a no-op
3037 * Must check if file sparse. If not sparse, and not extending
3038 * then no need to do anything since file already allocated
3039 */
3040 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
3041 if (keep_size == true)
Steve Frenchcfe89092018-05-19 02:04:55 -05003042 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05003043 /* check if extending file */
3044 else if (i_size_read(inode) >= off + len)
3045 /* not extending file and already not sparse */
Steve Frenchcfe89092018-05-19 02:04:55 -05003046 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05003047 /* BB: in future add else clause to extend file */
3048 else
Steve Frenchcfe89092018-05-19 02:04:55 -05003049 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003050 if (rc)
3051 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3052 tcon->tid, tcon->ses->Suid, off, len, rc);
3053 else
3054 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid,
3055 tcon->tid, tcon->ses->Suid, off, len);
Steve Frenchcfe89092018-05-19 02:04:55 -05003056 free_xid(xid);
3057 return rc;
Steve French9ccf3212014-10-18 17:01:15 -05003058 }
3059
3060 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3061 /*
3062 * Check if falloc starts within first few pages of file
3063 * and ends within a few pages of the end of file to
3064 * ensure that most of file is being forced to be
3065 * fallocated now. If so then setting whole file sparse
3066 * ie potentially making a few extra pages at the beginning
3067 * or end of the file non-sparse via set_sparse is harmless.
3068 */
Steve Frenchcfe89092018-05-19 02:04:55 -05003069 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3070 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003071 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3072 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003073 free_xid(xid);
3074 return rc;
3075 }
Steve French9ccf3212014-10-18 17:01:15 -05003076
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003077 smb2_set_sparse(xid, tcon, cfile, inode, false);
3078 rc = 0;
3079 } else {
3080 smb2_set_sparse(xid, tcon, cfile, inode, false);
3081 rc = 0;
3082 if (i_size_read(inode) < off + len) {
3083 eof = cpu_to_le64(off + len);
3084 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3085 cfile->fid.volatile_fid, cfile->pid,
3086 &eof);
3087 }
Steve French9ccf3212014-10-18 17:01:15 -05003088 }
Steve French9ccf3212014-10-18 17:01:15 -05003089
Steve French779ede02019-03-13 01:41:49 -05003090 if (rc)
3091 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3092 tcon->ses->Suid, off, len, rc);
3093 else
3094 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3095 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003096
3097 free_xid(xid);
3098 return rc;
3099}
3100
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003101static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3102{
3103 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3104 struct cifsInodeInfo *cifsi;
3105 struct inode *inode;
3106 int rc = 0;
3107 struct file_allocated_range_buffer in_data, *out_data = NULL;
3108 u32 out_data_len;
3109 unsigned int xid;
3110
3111 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3112 return generic_file_llseek(file, offset, whence);
3113
3114 inode = d_inode(cfile->dentry);
3115 cifsi = CIFS_I(inode);
3116
3117 if (offset < 0 || offset >= i_size_read(inode))
3118 return -ENXIO;
3119
3120 xid = get_xid();
3121 /*
3122 * We need to be sure that all dirty pages are written as they
3123 * might fill holes on the server.
3124 * Note that we also MUST flush any written pages since at least
3125 * some servers (Windows2016) will not reflect recent writes in
3126 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3127 */
3128 wrcfile = find_writable_file(cifsi, false);
3129 if (wrcfile) {
3130 filemap_write_and_wait(inode->i_mapping);
3131 smb2_flush_file(xid, tcon, &wrcfile->fid);
3132 cifsFileInfo_put(wrcfile);
3133 }
3134
3135 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3136 if (whence == SEEK_HOLE)
3137 offset = i_size_read(inode);
3138 goto lseek_exit;
3139 }
3140
3141 in_data.file_offset = cpu_to_le64(offset);
3142 in_data.length = cpu_to_le64(i_size_read(inode));
3143
3144 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3145 cfile->fid.volatile_fid,
3146 FSCTL_QUERY_ALLOCATED_RANGES, true,
3147 (char *)&in_data, sizeof(in_data),
3148 sizeof(struct file_allocated_range_buffer),
3149 (char **)&out_data, &out_data_len);
3150 if (rc == -E2BIG)
3151 rc = 0;
3152 if (rc)
3153 goto lseek_exit;
3154
3155 if (whence == SEEK_HOLE && out_data_len == 0)
3156 goto lseek_exit;
3157
3158 if (whence == SEEK_DATA && out_data_len == 0) {
3159 rc = -ENXIO;
3160 goto lseek_exit;
3161 }
3162
3163 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3164 rc = -EINVAL;
3165 goto lseek_exit;
3166 }
3167 if (whence == SEEK_DATA) {
3168 offset = le64_to_cpu(out_data->file_offset);
3169 goto lseek_exit;
3170 }
3171 if (offset < le64_to_cpu(out_data->file_offset))
3172 goto lseek_exit;
3173
3174 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3175
3176 lseek_exit:
3177 free_xid(xid);
3178 kfree(out_data);
3179 if (!rc)
3180 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3181 else
3182 return rc;
3183}
3184
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003185static int smb3_fiemap(struct cifs_tcon *tcon,
3186 struct cifsFileInfo *cfile,
3187 struct fiemap_extent_info *fei, u64 start, u64 len)
3188{
3189 unsigned int xid;
3190 struct file_allocated_range_buffer in_data, *out_data;
3191 u32 out_data_len;
3192 int i, num, rc, flags, last_blob;
3193 u64 next;
3194
3195 if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
3196 return -EBADR;
3197
3198 xid = get_xid();
3199 again:
3200 in_data.file_offset = cpu_to_le64(start);
3201 in_data.length = cpu_to_le64(len);
3202
3203 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3204 cfile->fid.volatile_fid,
3205 FSCTL_QUERY_ALLOCATED_RANGES, true,
3206 (char *)&in_data, sizeof(in_data),
3207 1024 * sizeof(struct file_allocated_range_buffer),
3208 (char **)&out_data, &out_data_len);
3209 if (rc == -E2BIG) {
3210 last_blob = 0;
3211 rc = 0;
3212 } else
3213 last_blob = 1;
3214 if (rc)
3215 goto out;
3216
3217 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3218 rc = -EINVAL;
3219 goto out;
3220 }
3221 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3222 rc = -EINVAL;
3223 goto out;
3224 }
3225
3226 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3227 for (i = 0; i < num; i++) {
3228 flags = 0;
3229 if (i == num - 1 && last_blob)
3230 flags |= FIEMAP_EXTENT_LAST;
3231
3232 rc = fiemap_fill_next_extent(fei,
3233 le64_to_cpu(out_data[i].file_offset),
3234 le64_to_cpu(out_data[i].file_offset),
3235 le64_to_cpu(out_data[i].length),
3236 flags);
3237 if (rc < 0)
3238 goto out;
3239 if (rc == 1) {
3240 rc = 0;
3241 goto out;
3242 }
3243 }
3244
3245 if (!last_blob) {
3246 next = le64_to_cpu(out_data[num - 1].file_offset) +
3247 le64_to_cpu(out_data[num - 1].length);
3248 len = len - (next - start);
3249 start = next;
3250 goto again;
3251 }
3252
3253 out:
3254 free_xid(xid);
3255 kfree(out_data);
3256 return rc;
3257}
Steve French9ccf3212014-10-18 17:01:15 -05003258
Steve French31742c52014-08-17 08:38:47 -05003259static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3260 loff_t off, loff_t len)
3261{
3262 /* KEEP_SIZE already checked for by do_fallocate */
3263 if (mode & FALLOC_FL_PUNCH_HOLE)
3264 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05003265 else if (mode & FALLOC_FL_ZERO_RANGE) {
3266 if (mode & FALLOC_FL_KEEP_SIZE)
3267 return smb3_zero_range(file, tcon, off, len, true);
3268 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05003269 } else if (mode == FALLOC_FL_KEEP_SIZE)
3270 return smb3_simple_falloc(file, tcon, off, len, true);
3271 else if (mode == 0)
3272 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05003273
3274 return -EOPNOTSUPP;
3275}
3276
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003277static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003278smb2_downgrade_oplock(struct TCP_Server_Info *server,
3279 struct cifsInodeInfo *cinode, bool set_level2)
3280{
3281 if (set_level2)
3282 server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
3283 0, NULL);
3284 else
3285 server->ops->set_oplock_level(cinode, 0, 0, NULL);
3286}
3287
3288static void
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003289smb21_downgrade_oplock(struct TCP_Server_Info *server,
3290 struct cifsInodeInfo *cinode, bool set_level2)
3291{
3292 server->ops->set_oplock_level(cinode,
3293 set_level2 ? SMB2_LEASE_READ_CACHING_HE :
3294 0, 0, NULL);
3295}
3296
3297static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003298smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3299 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003300{
3301 oplock &= 0xFF;
3302 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3303 return;
3304 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003305 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003306 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3307 &cinode->vfs_inode);
3308 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003309 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003310 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3311 &cinode->vfs_inode);
3312 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3313 cinode->oplock = CIFS_CACHE_READ_FLG;
3314 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3315 &cinode->vfs_inode);
3316 } else
3317 cinode->oplock = 0;
3318}
3319
3320static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003321smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3322 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003323{
3324 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003325 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003326
3327 oplock &= 0xFF;
3328 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3329 return;
3330
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003331 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003332 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003333 strcat(message, "R");
3334 }
3335 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003336 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003337 strcat(message, "H");
3338 }
3339 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003340 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003341 strcat(message, "W");
3342 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003343 if (!new_oplock)
3344 strncpy(message, "None", sizeof(message));
3345
3346 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003347 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3348 &cinode->vfs_inode);
3349}
3350
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003351static void
3352smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3353 unsigned int epoch, bool *purge_cache)
3354{
3355 unsigned int old_oplock = cinode->oplock;
3356
3357 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3358
3359 if (purge_cache) {
3360 *purge_cache = false;
3361 if (old_oplock == CIFS_CACHE_READ_FLG) {
3362 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3363 (epoch - cinode->epoch > 0))
3364 *purge_cache = true;
3365 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3366 (epoch - cinode->epoch > 1))
3367 *purge_cache = true;
3368 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3369 (epoch - cinode->epoch > 1))
3370 *purge_cache = true;
3371 else if (cinode->oplock == 0 &&
3372 (epoch - cinode->epoch > 0))
3373 *purge_cache = true;
3374 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
3375 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3376 (epoch - cinode->epoch > 0))
3377 *purge_cache = true;
3378 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3379 (epoch - cinode->epoch > 1))
3380 *purge_cache = true;
3381 }
3382 cinode->epoch = epoch;
3383 }
3384}
3385
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003386static bool
3387smb2_is_read_op(__u32 oplock)
3388{
3389 return oplock == SMB2_OPLOCK_LEVEL_II;
3390}
3391
3392static bool
3393smb21_is_read_op(__u32 oplock)
3394{
3395 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3396 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3397}
3398
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003399static __le32
3400map_oplock_to_lease(u8 oplock)
3401{
3402 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3403 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3404 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3405 return SMB2_LEASE_READ_CACHING;
3406 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3407 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3408 SMB2_LEASE_WRITE_CACHING;
3409 return 0;
3410}
3411
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003412static char *
3413smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3414{
3415 struct create_lease *buf;
3416
3417 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
3418 if (!buf)
3419 return NULL;
3420
Stefano Brivio729c0c92018-07-05 15:10:02 +02003421 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003422 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003423
3424 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3425 (struct create_lease, lcontext));
3426 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
3427 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3428 (struct create_lease, Name));
3429 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003430 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003431 buf->Name[0] = 'R';
3432 buf->Name[1] = 'q';
3433 buf->Name[2] = 'L';
3434 buf->Name[3] = 's';
3435 return (char *)buf;
3436}
3437
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003438static char *
3439smb3_create_lease_buf(u8 *lease_key, u8 oplock)
3440{
3441 struct create_lease_v2 *buf;
3442
3443 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
3444 if (!buf)
3445 return NULL;
3446
Stefano Brivio729c0c92018-07-05 15:10:02 +02003447 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003448 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3449
3450 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3451 (struct create_lease_v2, lcontext));
3452 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
3453 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3454 (struct create_lease_v2, Name));
3455 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003456 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003457 buf->Name[0] = 'R';
3458 buf->Name[1] = 'q';
3459 buf->Name[2] = 'L';
3460 buf->Name[3] = 's';
3461 return (char *)buf;
3462}
3463
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003464static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003465smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003466{
3467 struct create_lease *lc = (struct create_lease *)buf;
3468
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003469 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003470 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3471 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3472 return le32_to_cpu(lc->lcontext.LeaseState);
3473}
3474
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003475static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003476smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003477{
3478 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
3479
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003480 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003481 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3482 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003483 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02003484 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003485 return le32_to_cpu(lc->lcontext.LeaseState);
3486}
3487
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003488static unsigned int
3489smb2_wp_retry_size(struct inode *inode)
3490{
3491 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
3492 SMB2_MAX_BUFFER_SIZE);
3493}
3494
Pavel Shilovsky52755802014-08-18 20:49:57 +04003495static bool
3496smb2_dir_needs_close(struct cifsFileInfo *cfile)
3497{
3498 return !cfile->invalidHandle;
3499}
3500
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003501static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003502fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
Steve French2b2f7542019-06-07 15:16:10 -05003503 struct smb_rqst *old_rq, __le16 cipher_type)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003504{
3505 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003506 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003507
3508 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
3509 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
3510 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
3511 tr_hdr->Flags = cpu_to_le16(0x01);
Steve French2b2f7542019-06-07 15:16:10 -05003512 if (cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3513 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3514 else
3515 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003516 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003517}
3518
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003519/* We can not use the normal sg_set_buf() as we will sometimes pass a
3520 * stack object as buf.
3521 */
3522static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3523 unsigned int buflen)
3524{
Sebastien Tisserantee9d6612019-08-01 12:06:08 -05003525 void *addr;
3526 /*
3527 * VMAP_STACK (at least) puts stack into the vmalloc address space
3528 */
3529 if (is_vmalloc_addr(buf))
3530 addr = vmalloc_to_page(buf);
3531 else
3532 addr = virt_to_page(buf);
3533 sg_set_page(sg, addr, buflen, offset_in_page(buf));
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003534}
3535
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003536/* Assumes the first rqst has a transform header as the first iov.
3537 * I.e.
3538 * rqst[0].rq_iov[0] is transform header
3539 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
3540 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003541 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003542static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003543init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003544{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003545 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003546 struct scatterlist *sg;
3547 unsigned int i;
3548 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003549 unsigned int idx = 0;
3550 int skip;
3551
3552 sg_len = 1;
3553 for (i = 0; i < num_rqst; i++)
3554 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003555
3556 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
3557 if (!sg)
3558 return NULL;
3559
3560 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003561 for (i = 0; i < num_rqst; i++) {
3562 for (j = 0; j < rqst[i].rq_nvec; j++) {
3563 /*
3564 * The first rqst has a transform header where the
3565 * first 20 bytes are not part of the encrypted blob
3566 */
3567 skip = (i == 0) && (j == 0) ? 20 : 0;
3568 smb2_sg_set_buf(&sg[idx++],
3569 rqst[i].rq_iov[j].iov_base + skip,
3570 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10003571 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05003572
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003573 for (j = 0; j < rqst[i].rq_npages; j++) {
3574 unsigned int len, offset;
3575
3576 rqst_page_get_length(&rqst[i], j, &len, &offset);
3577 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
3578 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003579 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003580 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003581 return sg;
3582}
3583
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003584static int
3585smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
3586{
3587 struct cifs_ses *ses;
3588 u8 *ses_enc_key;
3589
3590 spin_lock(&cifs_tcp_ses_lock);
3591 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3592 if (ses->Suid != ses_id)
3593 continue;
3594 ses_enc_key = enc ? ses->smb3encryptionkey :
3595 ses->smb3decryptionkey;
3596 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
3597 spin_unlock(&cifs_tcp_ses_lock);
3598 return 0;
3599 }
3600 spin_unlock(&cifs_tcp_ses_lock);
3601
3602 return 1;
3603}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003604/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003605 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
3606 * iov[0] - transform header (associate data),
3607 * iov[1-N] - SMB2 header and pages - data to encrypt.
3608 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003609 * untouched.
3610 */
3611static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003612crypt_message(struct TCP_Server_Info *server, int num_rqst,
3613 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003614{
3615 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003616 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003617 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003618 int rc = 0;
3619 struct scatterlist *sg;
3620 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003621 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003622 struct aead_request *req;
3623 char *iv;
3624 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003625 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003626 struct crypto_aead *tfm;
3627 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3628
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003629 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
3630 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003631 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003632 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003633 return 0;
3634 }
3635
3636 rc = smb3_crypto_aead_allocate(server);
3637 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003638 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003639 return rc;
3640 }
3641
3642 tfm = enc ? server->secmech.ccmaesencrypt :
3643 server->secmech.ccmaesdecrypt;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003644 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003645 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003646 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003647 return rc;
3648 }
3649
3650 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3651 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003652 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003653 return rc;
3654 }
3655
3656 req = aead_request_alloc(tfm, GFP_KERNEL);
3657 if (!req) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003658 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003659 return -ENOMEM;
3660 }
3661
3662 if (!enc) {
3663 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3664 crypt_len += SMB2_SIGNATURE_SIZE;
3665 }
3666
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003667 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003668 if (!sg) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003669 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003670 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003671 goto free_req;
3672 }
3673
3674 iv_len = crypto_aead_ivsize(tfm);
3675 iv = kzalloc(iv_len, GFP_KERNEL);
3676 if (!iv) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003677 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003678 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003679 goto free_sg;
3680 }
Steve French2b2f7542019-06-07 15:16:10 -05003681
3682 if (server->cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3683 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3684 else {
3685 iv[0] = 3;
3686 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
3687 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003688
3689 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
3690 aead_request_set_ad(req, assoc_data_len);
3691
3692 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003693 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003694
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003695 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
3696 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003697
3698 if (!rc && enc)
3699 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
3700
3701 kfree(iv);
3702free_sg:
3703 kfree(sg);
3704free_req:
3705 kfree(req);
3706 return rc;
3707}
3708
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003709void
3710smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003711{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003712 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003713
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003714 for (i = 0; i < num_rqst; i++) {
3715 if (rqst[i].rq_pages) {
3716 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
3717 put_page(rqst[i].rq_pages[j]);
3718 kfree(rqst[i].rq_pages);
3719 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003720 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003721}
3722
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003723/*
3724 * This function will initialize new_rq and encrypt the content.
3725 * The first entry, new_rq[0], only contains a single iov which contains
3726 * a smb2_transform_hdr and is pre-allocated by the caller.
3727 * This function then populates new_rq[1+] with the content from olq_rq[0+].
3728 *
3729 * The end result is an array of smb_rqst structures where the first structure
3730 * only contains a single iov for the transform header which we then can pass
3731 * to crypt_message().
3732 *
3733 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
3734 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
3735 */
3736static int
3737smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
3738 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003739{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003740 struct page **pages;
3741 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
3742 unsigned int npages;
3743 unsigned int orig_len = 0;
3744 int i, j;
3745 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003746
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003747 for (i = 1; i < num_rqst; i++) {
3748 npages = old_rq[i - 1].rq_npages;
3749 pages = kmalloc_array(npages, sizeof(struct page *),
3750 GFP_KERNEL);
3751 if (!pages)
3752 goto err_free;
3753
3754 new_rq[i].rq_pages = pages;
3755 new_rq[i].rq_npages = npages;
3756 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
3757 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
3758 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
3759 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
3760 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
3761
3762 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
3763
3764 for (j = 0; j < npages; j++) {
3765 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3766 if (!pages[j])
3767 goto err_free;
3768 }
3769
3770 /* copy pages form the old */
3771 for (j = 0; j < npages; j++) {
3772 char *dst, *src;
3773 unsigned int offset, len;
3774
3775 rqst_page_get_length(&new_rq[i], j, &len, &offset);
3776
3777 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
3778 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
3779
3780 memcpy(dst, src, len);
3781 kunmap(new_rq[i].rq_pages[j]);
3782 kunmap(old_rq[i - 1].rq_pages[j]);
3783 }
3784 }
3785
3786 /* fill the 1st iov with a transform header */
Steve French2b2f7542019-06-07 15:16:10 -05003787 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003788
3789 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02003790 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003791 if (rc)
3792 goto err_free;
3793
3794 return rc;
3795
3796err_free:
3797 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
3798 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003799}
3800
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003801static int
3802smb3_is_transform_hdr(void *buf)
3803{
3804 struct smb2_transform_hdr *trhdr = buf;
3805
3806 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
3807}
3808
3809static int
3810decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
3811 unsigned int buf_data_size, struct page **pages,
3812 unsigned int npages, unsigned int page_data_size)
3813{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003814 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003815 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003816 int rc;
3817
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003818 iov[0].iov_base = buf;
3819 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
3820 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
3821 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003822
3823 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003824 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003825 rqst.rq_pages = pages;
3826 rqst.rq_npages = npages;
3827 rqst.rq_pagesz = PAGE_SIZE;
3828 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
3829
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003830 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02003831 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003832
3833 if (rc)
3834 return rc;
3835
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003836 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003837
3838 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003839
3840 return rc;
3841}
3842
3843static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003844read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
3845 unsigned int npages, unsigned int len)
3846{
3847 int i;
3848 int length;
3849
3850 for (i = 0; i < npages; i++) {
3851 struct page *page = pages[i];
3852 size_t n;
3853
3854 n = len;
3855 if (len >= PAGE_SIZE) {
3856 /* enough data to fill the page */
3857 n = PAGE_SIZE;
3858 len -= n;
3859 } else {
3860 zero_user(page, len, PAGE_SIZE - len);
3861 len = 0;
3862 }
Long Li1dbe3462018-05-30 12:47:55 -07003863 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003864 if (length < 0)
3865 return length;
3866 server->total_read += length;
3867 }
3868
3869 return 0;
3870}
3871
3872static int
3873init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
3874 unsigned int cur_off, struct bio_vec **page_vec)
3875{
3876 struct bio_vec *bvec;
3877 int i;
3878
3879 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
3880 if (!bvec)
3881 return -ENOMEM;
3882
3883 for (i = 0; i < npages; i++) {
3884 bvec[i].bv_page = pages[i];
3885 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
3886 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
3887 data_size -= bvec[i].bv_len;
3888 }
3889
3890 if (data_size != 0) {
3891 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
3892 kfree(bvec);
3893 return -EIO;
3894 }
3895
3896 *page_vec = bvec;
3897 return 0;
3898}
3899
3900static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003901handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3902 char *buf, unsigned int buf_len, struct page **pages,
3903 unsigned int npages, unsigned int page_data_size)
3904{
3905 unsigned int data_offset;
3906 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003907 unsigned int cur_off;
3908 unsigned int cur_page_idx;
3909 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003910 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10003911 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003912 struct bio_vec *bvec = NULL;
3913 struct iov_iter iter;
3914 struct kvec iov;
3915 int length;
Long Li74dcf412017-11-22 17:38:46 -07003916 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003917
3918 if (shdr->Command != SMB2_READ) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003919 cifs_server_dbg(VFS, "only big read responses are supported\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003920 return -ENOTSUPP;
3921 }
3922
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003923 if (server->ops->is_session_expired &&
3924 server->ops->is_session_expired(buf)) {
3925 cifs_reconnect(server);
3926 wake_up(&server->response_q);
3927 return -1;
3928 }
3929
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003930 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08003931 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003932 return -1;
3933
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003934 /* set up first two iov to get credits */
3935 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003936 rdata->iov[0].iov_len = 0;
3937 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003938 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08003939 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003940 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3941 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3942 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3943 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3944
3945 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003946 if (rdata->result != 0) {
3947 cifs_dbg(FYI, "%s: server returned error %d\n",
3948 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08003949 /* normal error on read response */
3950 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003951 return 0;
3952 }
3953
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003954 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07003955#ifdef CONFIG_CIFS_SMB_DIRECT
3956 use_rdma_mr = rdata->mr;
3957#endif
3958 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003959
3960 if (data_offset < server->vals->read_rsp_size) {
3961 /*
3962 * win2k8 sometimes sends an offset of 0 when the read
3963 * is beyond the EOF. Treat it as if the data starts just after
3964 * the header.
3965 */
3966 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
3967 __func__, data_offset);
3968 data_offset = server->vals->read_rsp_size;
3969 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
3970 /* data_offset is beyond the end of smallbuf */
3971 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
3972 __func__, data_offset);
3973 rdata->result = -EIO;
3974 dequeue_mid(mid, rdata->result);
3975 return 0;
3976 }
3977
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003978 pad_len = data_offset - server->vals->read_rsp_size;
3979
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003980 if (buf_len <= data_offset) {
3981 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003982 cur_page_idx = pad_len / PAGE_SIZE;
3983 cur_off = pad_len % PAGE_SIZE;
3984
3985 if (cur_page_idx != 0) {
3986 /* data offset is beyond the 1st page of response */
3987 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
3988 __func__, data_offset);
3989 rdata->result = -EIO;
3990 dequeue_mid(mid, rdata->result);
3991 return 0;
3992 }
3993
3994 if (data_len > page_data_size - pad_len) {
3995 /* data_len is corrupt -- discard frame */
3996 rdata->result = -EIO;
3997 dequeue_mid(mid, rdata->result);
3998 return 0;
3999 }
4000
4001 rdata->result = init_read_bvec(pages, npages, page_data_size,
4002 cur_off, &bvec);
4003 if (rdata->result != 0) {
4004 dequeue_mid(mid, rdata->result);
4005 return 0;
4006 }
4007
David Howellsaa563d72018-10-20 00:57:56 +01004008 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004009 } else if (buf_len >= data_offset + data_len) {
4010 /* read response payload is in buf */
4011 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
4012 iov.iov_base = buf + data_offset;
4013 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01004014 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004015 } else {
4016 /* read response payload cannot be in both buf and pages */
4017 WARN_ONCE(1, "buf can not contain only a part of read data");
4018 rdata->result = -EIO;
4019 dequeue_mid(mid, rdata->result);
4020 return 0;
4021 }
4022
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004023 length = rdata->copy_into_pages(server, rdata, &iter);
4024
4025 kfree(bvec);
4026
4027 if (length < 0)
4028 return length;
4029
4030 dequeue_mid(mid, false);
4031 return length;
4032}
4033
Steve French35cf94a2019-09-07 01:09:49 -05004034struct smb2_decrypt_work {
4035 struct work_struct decrypt;
4036 struct TCP_Server_Info *server;
4037 struct page **ppages;
4038 char *buf;
4039 unsigned int npages;
4040 unsigned int len;
4041};
4042
4043
4044static void smb2_decrypt_offload(struct work_struct *work)
4045{
4046 struct smb2_decrypt_work *dw = container_of(work,
4047 struct smb2_decrypt_work, decrypt);
4048 int i, rc;
4049 struct mid_q_entry *mid;
4050
4051 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
4052 dw->ppages, dw->npages, dw->len);
4053 if (rc) {
4054 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4055 goto free_pages;
4056 }
4057
Steve French22553972019-09-13 16:47:31 -05004058 dw->server->lstrp = jiffies;
Steve French35cf94a2019-09-07 01:09:49 -05004059 mid = smb2_find_mid(dw->server, dw->buf);
4060 if (mid == NULL)
4061 cifs_dbg(FYI, "mid not found\n");
4062 else {
4063 mid->decrypted = true;
4064 rc = handle_read_data(dw->server, mid, dw->buf,
4065 dw->server->vals->read_rsp_size,
4066 dw->ppages, dw->npages, dw->len);
Steve French22553972019-09-13 16:47:31 -05004067 mid->callback(mid);
4068 cifs_mid_q_entry_release(mid);
Steve French35cf94a2019-09-07 01:09:49 -05004069 }
4070
Steve French35cf94a2019-09-07 01:09:49 -05004071free_pages:
4072 for (i = dw->npages-1; i >= 0; i--)
4073 put_page(dw->ppages[i]);
4074
4075 kfree(dw->ppages);
4076 cifs_small_buf_release(dw->buf);
4077}
4078
4079
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004080static int
Steve French35cf94a2019-09-07 01:09:49 -05004081receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4082 int *num_mids)
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004083{
4084 char *buf = server->smallbuf;
4085 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4086 unsigned int npages;
4087 struct page **pages;
4088 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004089 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004090 int rc;
4091 int i = 0;
Steve French35cf94a2019-09-07 01:09:49 -05004092 struct smb2_decrypt_work *dw;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004093
Steve French35cf94a2019-09-07 01:09:49 -05004094 *num_mids = 1;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004095 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004096 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4097
4098 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4099 if (rc < 0)
4100 return rc;
4101 server->total_read += rc;
4102
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004103 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11004104 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004105 npages = DIV_ROUND_UP(len, PAGE_SIZE);
4106
4107 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
4108 if (!pages) {
4109 rc = -ENOMEM;
4110 goto discard_data;
4111 }
4112
4113 for (; i < npages; i++) {
4114 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4115 if (!pages[i]) {
4116 rc = -ENOMEM;
4117 goto discard_data;
4118 }
4119 }
4120
4121 /* read read data into pages */
4122 rc = read_data_into_pages(server, pages, npages, len);
4123 if (rc)
4124 goto free_pages;
4125
Pavel Shilovsky350be252017-04-10 10:31:33 -07004126 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004127 if (rc)
4128 goto free_pages;
4129
Steve French35cf94a2019-09-07 01:09:49 -05004130 /*
4131 * For large reads, offload to different thread for better performance,
4132 * use more cores decrypting which can be expensive
4133 */
4134
Steve French10328c42019-09-09 13:30:15 -05004135 if ((server->min_offload) && (server->in_flight > 1) &&
Steve French563317e2019-09-08 23:22:02 -05004136 (server->pdu_size >= server->min_offload)) {
Steve French35cf94a2019-09-07 01:09:49 -05004137 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4138 if (dw == NULL)
4139 goto non_offloaded_decrypt;
4140
4141 dw->buf = server->smallbuf;
4142 server->smallbuf = (char *)cifs_small_buf_get();
4143
4144 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4145
4146 dw->npages = npages;
4147 dw->server = server;
4148 dw->ppages = pages;
4149 dw->len = len;
4150 queue_work(cifsiod_wq, &dw->decrypt);
4151 *num_mids = 0; /* worker thread takes care of finding mid */
4152 return -1;
4153 }
4154
4155non_offloaded_decrypt:
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004156 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004157 pages, npages, len);
4158 if (rc)
4159 goto free_pages;
4160
4161 *mid = smb2_find_mid(server, buf);
4162 if (*mid == NULL)
4163 cifs_dbg(FYI, "mid not found\n");
4164 else {
4165 cifs_dbg(FYI, "mid found\n");
4166 (*mid)->decrypted = true;
4167 rc = handle_read_data(server, *mid, buf,
4168 server->vals->read_rsp_size,
4169 pages, npages, len);
4170 }
4171
4172free_pages:
4173 for (i = i - 1; i >= 0; i--)
4174 put_page(pages[i]);
4175 kfree(pages);
4176 return rc;
4177discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07004178 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004179 goto free_pages;
4180}
4181
4182static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004183receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004184 struct mid_q_entry **mids, char **bufs,
4185 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004186{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004187 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004188 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004189 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004190 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004191 unsigned int buf_size;
4192 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004193 int next_is_large;
4194 char *next_buffer = NULL;
4195
4196 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004197
4198 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004199 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004200 server->large_buf = true;
4201 memcpy(server->bigbuf, buf, server->total_read);
4202 buf = server->bigbuf;
4203 }
4204
4205 /* now read the rest */
4206 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004207 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004208 if (length < 0)
4209 return length;
4210 server->total_read += length;
4211
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004212 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004213 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
4214 if (length)
4215 return length;
4216
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004217 next_is_large = server->large_buf;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004218one_more:
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004219 shdr = (struct smb2_sync_hdr *)buf;
4220 if (shdr->NextCommand) {
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004221 if (next_is_large)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004222 next_buffer = (char *)cifs_buf_get();
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004223 else
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004224 next_buffer = (char *)cifs_small_buf_get();
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004225 memcpy(next_buffer,
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004226 buf + le32_to_cpu(shdr->NextCommand),
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004227 pdu_length - le32_to_cpu(shdr->NextCommand));
4228 }
4229
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004230 mid_entry = smb2_find_mid(server, buf);
4231 if (mid_entry == NULL)
4232 cifs_dbg(FYI, "mid not found\n");
4233 else {
4234 cifs_dbg(FYI, "mid found\n");
4235 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004236 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004237 }
4238
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004239 if (*num_mids >= MAX_COMPOUND) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004240 cifs_server_dbg(VFS, "too many PDUs in compound\n");
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004241 return -1;
4242 }
4243 bufs[*num_mids] = buf;
4244 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004245
4246 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004247 ret = mid_entry->handle(server, mid_entry);
4248 else
4249 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004250
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004251 if (ret == 0 && shdr->NextCommand) {
4252 pdu_length -= le32_to_cpu(shdr->NextCommand);
4253 server->large_buf = next_is_large;
4254 if (next_is_large)
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004255 server->bigbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004256 else
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004257 server->smallbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004258 goto one_more;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004259 } else if (ret != 0) {
4260 /*
4261 * ret != 0 here means that we didn't get to handle_mid() thus
4262 * server->smallbuf and server->bigbuf are still valid. We need
4263 * to free next_buffer because it is not going to be used
4264 * anywhere.
4265 */
4266 if (next_is_large)
4267 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4268 else
4269 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004270 }
4271
4272 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004273}
4274
4275static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004276smb3_receive_transform(struct TCP_Server_Info *server,
4277 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004278{
4279 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004280 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004281 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4282 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4283
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004284 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004285 sizeof(struct smb2_sync_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004286 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004287 pdu_length);
4288 cifs_reconnect(server);
4289 wake_up(&server->response_q);
4290 return -ECONNABORTED;
4291 }
4292
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004293 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004294 cifs_server_dbg(VFS, "Transform message is broken\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004295 cifs_reconnect(server);
4296 wake_up(&server->response_q);
4297 return -ECONNABORTED;
4298 }
4299
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004300 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004301 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
Steve French35cf94a2019-09-07 01:09:49 -05004302 return receive_encrypted_read(server, &mids[0], num_mids);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004303 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004304
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004305 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004306}
4307
4308int
4309smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4310{
4311 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
4312
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004313 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004314 NULL, 0, 0);
4315}
4316
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004317static int
4318smb2_next_header(char *buf)
4319{
4320 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
4321 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
4322
4323 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
4324 return sizeof(struct smb2_transform_hdr) +
4325 le32_to_cpu(t_hdr->OriginalMessageSize);
4326
4327 return le32_to_cpu(hdr->NextCommand);
4328}
4329
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004330static int
4331smb2_make_node(unsigned int xid, struct inode *inode,
4332 struct dentry *dentry, struct cifs_tcon *tcon,
4333 char *full_path, umode_t mode, dev_t dev)
4334{
4335 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4336 int rc = -EPERM;
4337 int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
4338 FILE_ALL_INFO *buf = NULL;
4339 struct cifs_io_parms io_parms;
4340 __u32 oplock = 0;
4341 struct cifs_fid fid;
4342 struct cifs_open_parms oparms;
4343 unsigned int bytes_written;
4344 struct win_dev *pdev;
4345 struct kvec iov[2];
4346
4347 /*
4348 * Check if mounted with mount parm 'sfu' mount parm.
4349 * SFU emulation should work with all servers, but only
4350 * supports block and char device (no socket & fifo),
4351 * and was used by default in earlier versions of Windows
4352 */
4353 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
4354 goto out;
4355
4356 /*
4357 * TODO: Add ability to create instead via reparse point. Windows (e.g.
4358 * their current NFS server) uses this approach to expose special files
4359 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
4360 */
4361
4362 if (!S_ISCHR(mode) && !S_ISBLK(mode))
4363 goto out;
4364
4365 cifs_dbg(FYI, "sfu compat create special file\n");
4366
4367 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
4368 if (buf == NULL) {
4369 rc = -ENOMEM;
4370 goto out;
4371 }
4372
4373 if (backup_cred(cifs_sb))
4374 create_options |= CREATE_OPEN_BACKUP_INTENT;
4375
4376 oparms.tcon = tcon;
4377 oparms.cifs_sb = cifs_sb;
4378 oparms.desired_access = GENERIC_WRITE;
4379 oparms.create_options = create_options;
4380 oparms.disposition = FILE_CREATE;
4381 oparms.path = full_path;
4382 oparms.fid = &fid;
4383 oparms.reconnect = false;
4384
4385 if (tcon->ses->server->oplocks)
4386 oplock = REQ_OPLOCK;
4387 else
4388 oplock = 0;
4389 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
4390 if (rc)
4391 goto out;
4392
4393 /*
4394 * BB Do not bother to decode buf since no local inode yet to put
4395 * timestamps in, but we can reuse it safely.
4396 */
4397
4398 pdev = (struct win_dev *)buf;
4399 io_parms.pid = current->tgid;
4400 io_parms.tcon = tcon;
4401 io_parms.offset = 0;
4402 io_parms.length = sizeof(struct win_dev);
4403 iov[1].iov_base = buf;
4404 iov[1].iov_len = sizeof(struct win_dev);
4405 if (S_ISCHR(mode)) {
4406 memcpy(pdev->type, "IntxCHR", 8);
4407 pdev->major = cpu_to_le64(MAJOR(dev));
4408 pdev->minor = cpu_to_le64(MINOR(dev));
4409 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4410 &bytes_written, iov, 1);
4411 } else if (S_ISBLK(mode)) {
4412 memcpy(pdev->type, "IntxBLK", 8);
4413 pdev->major = cpu_to_le64(MAJOR(dev));
4414 pdev->minor = cpu_to_le64(MINOR(dev));
4415 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4416 &bytes_written, iov, 1);
4417 }
4418 tcon->ses->server->ops->close(xid, tcon, &fid);
4419 d_drop(dentry);
4420
4421 /* FIXME: add code here to set EAs */
4422out:
4423 kfree(buf);
4424 return rc;
4425}
4426
4427
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004428struct smb_version_operations smb20_operations = {
4429 .compare_fids = smb2_compare_fids,
4430 .setup_request = smb2_setup_request,
4431 .setup_async_request = smb2_setup_async_request,
4432 .check_receive = smb2_check_receive,
4433 .add_credits = smb2_add_credits,
4434 .set_credits = smb2_set_credits,
4435 .get_credits_field = smb2_get_credits_field,
4436 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004437 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004438 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004439 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004440 .read_data_offset = smb2_read_data_offset,
4441 .read_data_length = smb2_read_data_length,
4442 .map_error = map_smb2_to_linux_error,
4443 .find_mid = smb2_find_mid,
4444 .check_message = smb2_check_message,
4445 .dump_detail = smb2_dump_detail,
4446 .clear_stats = smb2_clear_stats,
4447 .print_stats = smb2_print_stats,
4448 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004449 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004450 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004451 .need_neg = smb2_need_neg,
4452 .negotiate = smb2_negotiate,
4453 .negotiate_wsize = smb2_negotiate_wsize,
4454 .negotiate_rsize = smb2_negotiate_rsize,
4455 .sess_setup = SMB2_sess_setup,
4456 .logoff = SMB2_logoff,
4457 .tree_connect = SMB2_tcon,
4458 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004459 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004460 .is_path_accessible = smb2_is_path_accessible,
4461 .can_echo = smb2_can_echo,
4462 .echo = SMB2_echo,
4463 .query_path_info = smb2_query_path_info,
4464 .get_srv_inum = smb2_get_srv_inum,
4465 .query_file_info = smb2_query_file_info,
4466 .set_path_size = smb2_set_path_size,
4467 .set_file_size = smb2_set_file_size,
4468 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004469 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004470 .mkdir = smb2_mkdir,
4471 .mkdir_setinfo = smb2_mkdir_setinfo,
4472 .rmdir = smb2_rmdir,
4473 .unlink = smb2_unlink,
4474 .rename = smb2_rename_path,
4475 .create_hardlink = smb2_create_hardlink,
4476 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01004477 .query_mf_symlink = smb3_query_mf_symlink,
4478 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004479 .open = smb2_open_file,
4480 .set_fid = smb2_set_fid,
4481 .close = smb2_close_file,
4482 .flush = smb2_flush_file,
4483 .async_readv = smb2_async_readv,
4484 .async_writev = smb2_async_writev,
4485 .sync_read = smb2_sync_read,
4486 .sync_write = smb2_sync_write,
4487 .query_dir_first = smb2_query_dir_first,
4488 .query_dir_next = smb2_query_dir_next,
4489 .close_dir = smb2_close_dir,
4490 .calc_smb_size = smb2_calc_size,
4491 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004492 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004493 .oplock_response = smb2_oplock_response,
4494 .queryfs = smb2_queryfs,
4495 .mand_lock = smb2_mand_lock,
4496 .mand_unlock_range = smb2_unlock_range,
4497 .push_mand_locks = smb2_push_mandatory_locks,
4498 .get_lease_key = smb2_get_lease_key,
4499 .set_lease_key = smb2_set_lease_key,
4500 .new_lease_key = smb2_new_lease_key,
4501 .calc_signature = smb2_calc_signature,
4502 .is_read_op = smb2_is_read_op,
4503 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004504 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004505 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004506 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004507 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004508 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004509 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304510 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004511#ifdef CONFIG_CIFS_XATTR
4512 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004513 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004514#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004515 .get_acl = get_smb2_acl,
4516 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004517 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004518 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004519 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004520 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004521 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004522 .llseek = smb3_llseek,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004523};
4524
Steve French1080ef72011-02-24 18:07:19 +00004525struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004526 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004527 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04004528 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004529 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04004530 .add_credits = smb2_add_credits,
4531 .set_credits = smb2_set_credits,
4532 .get_credits_field = smb2_get_credits_field,
4533 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004534 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004535 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004536 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004537 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004538 .read_data_offset = smb2_read_data_offset,
4539 .read_data_length = smb2_read_data_length,
4540 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004541 .find_mid = smb2_find_mid,
4542 .check_message = smb2_check_message,
4543 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04004544 .clear_stats = smb2_clear_stats,
4545 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004546 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004547 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004548 .downgrade_oplock = smb21_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04004549 .need_neg = smb2_need_neg,
4550 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07004551 .negotiate_wsize = smb2_negotiate_wsize,
4552 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04004553 .sess_setup = SMB2_sess_setup,
4554 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04004555 .tree_connect = SMB2_tcon,
4556 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004557 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04004558 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04004559 .can_echo = smb2_can_echo,
4560 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04004561 .query_path_info = smb2_query_path_info,
4562 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07004563 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004564 .set_path_size = smb2_set_path_size,
4565 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004566 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004567 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04004568 .mkdir = smb2_mkdir,
4569 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04004570 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07004571 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004572 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07004573 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004574 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004575 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004576 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07004577 .open = smb2_open_file,
4578 .set_fid = smb2_set_fid,
4579 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07004580 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004581 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07004582 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004583 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004584 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004585 .query_dir_first = smb2_query_dir_first,
4586 .query_dir_next = smb2_query_dir_next,
4587 .close_dir = smb2_close_dir,
4588 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07004589 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004590 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004591 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07004592 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07004593 .mand_lock = smb2_mand_lock,
4594 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07004595 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07004596 .get_lease_key = smb2_get_lease_key,
4597 .set_lease_key = smb2_set_lease_key,
4598 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06004599 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004600 .is_read_op = smb21_is_read_op,
4601 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004602 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004603 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004604 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004605 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004606 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05004607 .enum_snapshots = smb3_enum_snapshots,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004608 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304609 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004610#ifdef CONFIG_CIFS_XATTR
4611 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004612 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004613#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004614 .get_acl = get_smb2_acl,
4615 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004616 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004617 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004618 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004619 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004620 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004621 .llseek = smb3_llseek,
Steve French38107d42012-12-08 22:08:06 -06004622};
4623
Steve French38107d42012-12-08 22:08:06 -06004624struct smb_version_operations smb30_operations = {
4625 .compare_fids = smb2_compare_fids,
4626 .setup_request = smb2_setup_request,
4627 .setup_async_request = smb2_setup_async_request,
4628 .check_receive = smb2_check_receive,
4629 .add_credits = smb2_add_credits,
4630 .set_credits = smb2_set_credits,
4631 .get_credits_field = smb2_get_credits_field,
4632 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004633 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004634 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06004635 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004636 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06004637 .read_data_offset = smb2_read_data_offset,
4638 .read_data_length = smb2_read_data_length,
4639 .map_error = map_smb2_to_linux_error,
4640 .find_mid = smb2_find_mid,
4641 .check_message = smb2_check_message,
4642 .dump_detail = smb2_dump_detail,
4643 .clear_stats = smb2_clear_stats,
4644 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05004645 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06004646 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004647 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004648 .downgrade_oplock = smb21_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06004649 .need_neg = smb2_need_neg,
4650 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004651 .negotiate_wsize = smb3_negotiate_wsize,
4652 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06004653 .sess_setup = SMB2_sess_setup,
4654 .logoff = SMB2_logoff,
4655 .tree_connect = SMB2_tcon,
4656 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05004657 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06004658 .is_path_accessible = smb2_is_path_accessible,
4659 .can_echo = smb2_can_echo,
4660 .echo = SMB2_echo,
4661 .query_path_info = smb2_query_path_info,
4662 .get_srv_inum = smb2_get_srv_inum,
4663 .query_file_info = smb2_query_file_info,
4664 .set_path_size = smb2_set_path_size,
4665 .set_file_size = smb2_set_file_size,
4666 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004667 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06004668 .mkdir = smb2_mkdir,
4669 .mkdir_setinfo = smb2_mkdir_setinfo,
4670 .rmdir = smb2_rmdir,
4671 .unlink = smb2_unlink,
4672 .rename = smb2_rename_path,
4673 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004674 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004675 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004676 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06004677 .open = smb2_open_file,
4678 .set_fid = smb2_set_fid,
4679 .close = smb2_close_file,
4680 .flush = smb2_flush_file,
4681 .async_readv = smb2_async_readv,
4682 .async_writev = smb2_async_writev,
4683 .sync_read = smb2_sync_read,
4684 .sync_write = smb2_sync_write,
4685 .query_dir_first = smb2_query_dir_first,
4686 .query_dir_next = smb2_query_dir_next,
4687 .close_dir = smb2_close_dir,
4688 .calc_smb_size = smb2_calc_size,
4689 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004690 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06004691 .oplock_response = smb2_oplock_response,
4692 .queryfs = smb2_queryfs,
4693 .mand_lock = smb2_mand_lock,
4694 .mand_unlock_range = smb2_unlock_range,
4695 .push_mand_locks = smb2_push_mandatory_locks,
4696 .get_lease_key = smb2_get_lease_key,
4697 .set_lease_key = smb2_set_lease_key,
4698 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004699 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06004700 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004701 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004702 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004703 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004704 .create_lease_buf = smb3_create_lease_buf,
4705 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004706 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05004707 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06004708 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004709 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004710 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05004711 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004712 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004713 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004714 .is_transform_hdr = smb3_is_transform_hdr,
4715 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004716 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304717 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004718#ifdef CONFIG_CIFS_XATTR
4719 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004720 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004721#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004722 .get_acl = get_smb2_acl,
4723 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004724 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004725 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004726 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004727 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004728 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004729 .llseek = smb3_llseek,
Steve French1080ef72011-02-24 18:07:19 +00004730};
4731
Steve Frenchaab18932015-06-23 23:37:11 -05004732struct smb_version_operations smb311_operations = {
4733 .compare_fids = smb2_compare_fids,
4734 .setup_request = smb2_setup_request,
4735 .setup_async_request = smb2_setup_async_request,
4736 .check_receive = smb2_check_receive,
4737 .add_credits = smb2_add_credits,
4738 .set_credits = smb2_set_credits,
4739 .get_credits_field = smb2_get_credits_field,
4740 .get_credits = smb2_get_credits,
4741 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004742 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05004743 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004744 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05004745 .read_data_offset = smb2_read_data_offset,
4746 .read_data_length = smb2_read_data_length,
4747 .map_error = map_smb2_to_linux_error,
4748 .find_mid = smb2_find_mid,
4749 .check_message = smb2_check_message,
4750 .dump_detail = smb2_dump_detail,
4751 .clear_stats = smb2_clear_stats,
4752 .print_stats = smb2_print_stats,
4753 .dump_share_caps = smb2_dump_share_caps,
4754 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004755 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004756 .downgrade_oplock = smb21_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05004757 .need_neg = smb2_need_neg,
4758 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004759 .negotiate_wsize = smb3_negotiate_wsize,
4760 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05004761 .sess_setup = SMB2_sess_setup,
4762 .logoff = SMB2_logoff,
4763 .tree_connect = SMB2_tcon,
4764 .tree_disconnect = SMB2_tdis,
4765 .qfs_tcon = smb3_qfs_tcon,
4766 .is_path_accessible = smb2_is_path_accessible,
4767 .can_echo = smb2_can_echo,
4768 .echo = SMB2_echo,
4769 .query_path_info = smb2_query_path_info,
4770 .get_srv_inum = smb2_get_srv_inum,
4771 .query_file_info = smb2_query_file_info,
4772 .set_path_size = smb2_set_path_size,
4773 .set_file_size = smb2_set_file_size,
4774 .set_file_info = smb2_set_file_info,
4775 .set_compression = smb2_set_compression,
4776 .mkdir = smb2_mkdir,
4777 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05004778 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05004779 .rmdir = smb2_rmdir,
4780 .unlink = smb2_unlink,
4781 .rename = smb2_rename_path,
4782 .create_hardlink = smb2_create_hardlink,
4783 .query_symlink = smb2_query_symlink,
4784 .query_mf_symlink = smb3_query_mf_symlink,
4785 .create_mf_symlink = smb3_create_mf_symlink,
4786 .open = smb2_open_file,
4787 .set_fid = smb2_set_fid,
4788 .close = smb2_close_file,
4789 .flush = smb2_flush_file,
4790 .async_readv = smb2_async_readv,
4791 .async_writev = smb2_async_writev,
4792 .sync_read = smb2_sync_read,
4793 .sync_write = smb2_sync_write,
4794 .query_dir_first = smb2_query_dir_first,
4795 .query_dir_next = smb2_query_dir_next,
4796 .close_dir = smb2_close_dir,
4797 .calc_smb_size = smb2_calc_size,
4798 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004799 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05004800 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05004801 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05004802 .mand_lock = smb2_mand_lock,
4803 .mand_unlock_range = smb2_unlock_range,
4804 .push_mand_locks = smb2_push_mandatory_locks,
4805 .get_lease_key = smb2_get_lease_key,
4806 .set_lease_key = smb2_set_lease_key,
4807 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004808 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05004809 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004810 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05004811 .is_read_op = smb21_is_read_op,
4812 .set_oplock_level = smb3_set_oplock_level,
4813 .create_lease_buf = smb3_create_lease_buf,
4814 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004815 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07004816 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05004817/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
4818 .wp_retry_size = smb2_wp_retry_size,
4819 .dir_needs_close = smb2_dir_needs_close,
4820 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004821 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004822 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004823 .is_transform_hdr = smb3_is_transform_hdr,
4824 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004825 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304826 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004827#ifdef CONFIG_CIFS_XATTR
4828 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004829 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004830#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10004831 .get_acl = get_smb2_acl,
4832 .get_acl_by_fid = get_smb2_acl_by_fid,
4833 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004834 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004835 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004836 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004837 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004838 .llseek = smb3_llseek,
Steve Frenchaab18932015-06-23 23:37:11 -05004839};
Steve Frenchaab18932015-06-23 23:37:11 -05004840
Steve Frenchdd446b12012-11-28 23:21:06 -06004841struct smb_version_values smb20_values = {
4842 .version_string = SMB20_VERSION_STRING,
4843 .protocol_id = SMB20_PROT_ID,
4844 .req_capabilities = 0, /* MBZ */
4845 .large_lock_type = 0,
4846 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4847 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4848 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004849 .header_size = sizeof(struct smb2_sync_hdr),
4850 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06004851 .max_header_size = MAX_SMB2_HDR_SIZE,
4852 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4853 .lock_cmd = SMB2_LOCK,
4854 .cap_unix = 0,
4855 .cap_nt_find = SMB2_NT_FIND,
4856 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004857 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4858 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004859 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06004860};
4861
Steve French1080ef72011-02-24 18:07:19 +00004862struct smb_version_values smb21_values = {
4863 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004864 .protocol_id = SMB21_PROT_ID,
4865 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
4866 .large_lock_type = 0,
4867 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4868 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4869 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004870 .header_size = sizeof(struct smb2_sync_hdr),
4871 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004872 .max_header_size = MAX_SMB2_HDR_SIZE,
4873 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4874 .lock_cmd = SMB2_LOCK,
4875 .cap_unix = 0,
4876 .cap_nt_find = SMB2_NT_FIND,
4877 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004878 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4879 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004880 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05004881};
4882
Steve French9764c022017-09-17 10:41:35 -05004883struct smb_version_values smb3any_values = {
4884 .version_string = SMB3ANY_VERSION_STRING,
4885 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004886 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004887 .large_lock_type = 0,
4888 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4889 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4890 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004891 .header_size = sizeof(struct smb2_sync_hdr),
4892 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004893 .max_header_size = MAX_SMB2_HDR_SIZE,
4894 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4895 .lock_cmd = SMB2_LOCK,
4896 .cap_unix = 0,
4897 .cap_nt_find = SMB2_NT_FIND,
4898 .cap_large_files = SMB2_LARGE_FILES,
4899 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4900 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4901 .create_lease_size = sizeof(struct create_lease_v2),
4902};
4903
4904struct smb_version_values smbdefault_values = {
4905 .version_string = SMBDEFAULT_VERSION_STRING,
4906 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004907 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004908 .large_lock_type = 0,
4909 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4910 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4911 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004912 .header_size = sizeof(struct smb2_sync_hdr),
4913 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004914 .max_header_size = MAX_SMB2_HDR_SIZE,
4915 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4916 .lock_cmd = SMB2_LOCK,
4917 .cap_unix = 0,
4918 .cap_nt_find = SMB2_NT_FIND,
4919 .cap_large_files = SMB2_LARGE_FILES,
4920 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4921 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4922 .create_lease_size = sizeof(struct create_lease_v2),
4923};
4924
Steve Frenche4aa25e2012-10-01 12:26:22 -05004925struct smb_version_values smb30_values = {
4926 .version_string = SMB30_VERSION_STRING,
4927 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004928 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004929 .large_lock_type = 0,
4930 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4931 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4932 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004933 .header_size = sizeof(struct smb2_sync_hdr),
4934 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004935 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004936 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004937 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004938 .cap_unix = 0,
4939 .cap_nt_find = SMB2_NT_FIND,
4940 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004941 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4942 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004943 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00004944};
Steve French20b6d8b2013-06-12 22:48:41 -05004945
4946struct smb_version_values smb302_values = {
4947 .version_string = SMB302_VERSION_STRING,
4948 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004949 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05004950 .large_lock_type = 0,
4951 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4952 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4953 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004954 .header_size = sizeof(struct smb2_sync_hdr),
4955 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05004956 .max_header_size = MAX_SMB2_HDR_SIZE,
4957 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4958 .lock_cmd = SMB2_LOCK,
4959 .cap_unix = 0,
4960 .cap_nt_find = SMB2_NT_FIND,
4961 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004962 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4963 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004964 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05004965};
Steve French5f7fbf72014-12-17 22:52:58 -06004966
Steve French5f7fbf72014-12-17 22:52:58 -06004967struct smb_version_values smb311_values = {
4968 .version_string = SMB311_VERSION_STRING,
4969 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004970 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06004971 .large_lock_type = 0,
4972 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4973 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4974 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004975 .header_size = sizeof(struct smb2_sync_hdr),
4976 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06004977 .max_header_size = MAX_SMB2_HDR_SIZE,
4978 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4979 .lock_cmd = SMB2_LOCK,
4980 .cap_unix = 0,
4981 .cap_nt_find = SMB2_NT_FIND,
4982 .cap_large_files = SMB2_LARGE_FILES,
4983 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4984 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4985 .create_lease_size = sizeof(struct create_lease_v2),
4986};