blob: a5c96bc522cb3e3afaf2f8e582aa586f5325cfc0 [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Steve French1080ef72011-02-24 18:07:19 +00006 */
7
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07008#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07009#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050010#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070011#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020012#include <linux/uuid.h>
Aurelien Aptel35adffe2019-09-20 06:29:39 +020013#include <linux/sort.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070014#include <crypto/aead.h>
Steve French1080ef72011-02-24 18:07:19 +000015#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040016#include "smb2pdu.h"
17#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040018#include "cifsproto.h"
19#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040020#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070021#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070022#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050023#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070024#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040025
Pavel Shilovskyef68e832019-01-18 17:25:36 -080026/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040027static int
28change_conf(struct TCP_Server_Info *server)
29{
30 server->credits += server->echo_credits + server->oplock_credits;
31 server->oplock_credits = server->echo_credits = 0;
32 switch (server->credits) {
33 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080034 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040035 case 1:
36 server->echoes = false;
37 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040038 break;
39 case 2:
40 server->echoes = true;
41 server->oplocks = false;
42 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040043 break;
44 default:
45 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050046 if (enable_oplocks) {
47 server->oplocks = true;
48 server->oplock_credits = 1;
49 } else
50 server->oplocks = false;
51
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040052 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040053 }
54 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080055 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040056}
57
58static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080059smb2_add_credits(struct TCP_Server_Info *server,
60 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040061{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080062 int *val, rc = -1;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080063 unsigned int add = credits->value;
64 unsigned int instance = credits->instance;
65 bool reconnect_detected = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080066
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040067 spin_lock(&server->req_lock);
68 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050069
70 /* eg found case where write overlapping reconnect messed up credits */
71 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
72 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
73 server->hostname, *val);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080074 if ((instance == 0) || (instance == server->reconnect_instance))
75 *val += add;
76 else
77 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050078
Steve French141891f2016-09-23 00:44:16 -050079 if (*val > 65000) {
80 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
81 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
82 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040083 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040084 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040085 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070086 /*
87 * Sometimes server returns 0 credits on oplock break ack - we need to
88 * rebalance credits in this case.
89 */
90 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
91 server->oplocks) {
92 if (server->credits > 1) {
93 server->credits--;
94 server->oplock_credits++;
95 }
96 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040097 spin_unlock(&server->req_lock);
98 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -080099
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800100 if (reconnect_detected)
101 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
102 add, instance);
103
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800104 if (server->tcpStatus == CifsNeedReconnect
105 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800106 return;
107
108 switch (rc) {
109 case -1:
110 /* change_conf hasn't been executed */
111 break;
112 case 0:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000113 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800114 break;
115 case 1:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000116 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800117 break;
118 case 2:
119 cifs_dbg(FYI, "disabling oplocks\n");
120 break;
121 default:
122 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
123 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400124}
125
126static void
127smb2_set_credits(struct TCP_Server_Info *server, const int val)
128{
129 spin_lock(&server->req_lock);
130 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500131 if (val == 1)
132 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400133 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500134 /* don't log while holding the lock */
135 if (val == 1)
136 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400137}
138
139static int *
140smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
141{
142 switch (optype) {
143 case CIFS_ECHO_OP:
144 return &server->echo_credits;
145 case CIFS_OBREAK_OP:
146 return &server->oplock_credits;
147 default:
148 return &server->credits;
149 }
150}
151
152static unsigned int
153smb2_get_credits(struct mid_q_entry *mid)
154{
Pavel Shilovsky86a79642019-11-21 11:35:13 -0800155 return mid->credits_received;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400156}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400157
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400158static int
159smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800160 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400161{
162 int rc = 0;
163 unsigned int scredits;
164
165 spin_lock(&server->req_lock);
166 while (1) {
167 if (server->credits <= 0) {
168 spin_unlock(&server->req_lock);
169 cifs_num_waiters_inc(server);
170 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000171 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400172 cifs_num_waiters_dec(server);
173 if (rc)
174 return rc;
175 spin_lock(&server->req_lock);
176 } else {
177 if (server->tcpStatus == CifsExiting) {
178 spin_unlock(&server->req_lock);
179 return -ENOENT;
180 }
181
182 scredits = server->credits;
183 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800184 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400185 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800186 credits->value = 0;
187 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400188 break;
189 }
190
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800191 /* leave some credits for reopen and other ops */
192 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400193 *num = min_t(unsigned int, size,
194 scredits * SMB2_MAX_BUFFER_SIZE);
195
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800196 credits->value =
197 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
198 credits->instance = server->reconnect_instance;
199 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400200 server->in_flight++;
Steve French1b63f182019-09-09 22:57:11 -0500201 if (server->in_flight > server->max_in_flight)
202 server->max_in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400203 break;
204 }
205 }
206 spin_unlock(&server->req_lock);
207 return rc;
208}
209
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800210static int
211smb2_adjust_credits(struct TCP_Server_Info *server,
212 struct cifs_credits *credits,
213 const unsigned int payload_size)
214{
215 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
216
217 if (!credits->value || credits->value == new_val)
218 return 0;
219
220 if (credits->value < new_val) {
221 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
222 credits->value, new_val);
223 return -ENOTSUPP;
224 }
225
226 spin_lock(&server->req_lock);
227
228 if (server->reconnect_instance != credits->instance) {
229 spin_unlock(&server->req_lock);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000230 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800231 credits->value - new_val);
232 return -EAGAIN;
233 }
234
235 server->credits += credits->value - new_val;
236 spin_unlock(&server->req_lock);
237 wake_up(&server->request_q);
238 credits->value = new_val;
239 return 0;
240}
241
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400242static __u64
243smb2_get_next_mid(struct TCP_Server_Info *server)
244{
245 __u64 mid;
246 /* for SMB2 we need the current value */
247 spin_lock(&GlobalMid_Lock);
248 mid = server->CurrentMid++;
249 spin_unlock(&GlobalMid_Lock);
250 return mid;
251}
Steve French1080ef72011-02-24 18:07:19 +0000252
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800253static void
254smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
255{
256 spin_lock(&GlobalMid_Lock);
257 if (server->CurrentMid >= val)
258 server->CurrentMid -= val;
259 spin_unlock(&GlobalMid_Lock);
260}
261
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400262static struct mid_q_entry *
263smb2_find_mid(struct TCP_Server_Info *server, char *buf)
264{
265 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000266 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700267 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400268
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700269 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000270 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600271 return NULL;
272 }
273
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400274 spin_lock(&GlobalMid_Lock);
275 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000276 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400277 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700278 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200279 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400280 spin_unlock(&GlobalMid_Lock);
281 return mid;
282 }
283 }
284 spin_unlock(&GlobalMid_Lock);
285 return NULL;
286}
287
288static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600289smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400290{
291#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000292 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400293
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000294 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700295 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
296 shdr->ProcessId);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000297 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500298 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400299#endif
300}
301
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400302static bool
303smb2_need_neg(struct TCP_Server_Info *server)
304{
305 return server->max_read == 0;
306}
307
308static int
309smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
310{
311 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200312
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200313 cifs_ses_server(ses)->CurrentMid = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400314 rc = SMB2_negotiate(xid, ses);
315 /* BB we probably don't need to retry with modern servers */
316 if (rc == -EAGAIN)
317 rc = -EHOSTDOWN;
318 return rc;
319}
320
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700321static unsigned int
322smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
323{
324 struct TCP_Server_Info *server = tcon->ses->server;
325 unsigned int wsize;
326
327 /* start with specified wsize, or default */
328 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
329 wsize = min_t(unsigned int, wsize, server->max_write);
Long Li09902f82017-11-22 17:38:39 -0700330#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700331 if (server->rdma) {
332 if (server->sign)
333 wsize = min_t(unsigned int,
334 wsize, server->smbd_conn->max_fragmented_send_size);
335 else
336 wsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700337 wsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700338 }
Long Li09902f82017-11-22 17:38:39 -0700339#endif
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400340 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
341 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700342
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700343 return wsize;
344}
345
346static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500347smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
348{
349 struct TCP_Server_Info *server = tcon->ses->server;
350 unsigned int wsize;
351
352 /* start with specified wsize, or default */
353 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
354 wsize = min_t(unsigned int, wsize, server->max_write);
355#ifdef CONFIG_CIFS_SMB_DIRECT
356 if (server->rdma) {
357 if (server->sign)
358 wsize = min_t(unsigned int,
359 wsize, server->smbd_conn->max_fragmented_send_size);
360 else
361 wsize = min_t(unsigned int,
362 wsize, server->smbd_conn->max_readwrite_size);
363 }
364#endif
365 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
366 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
367
368 return wsize;
369}
370
371static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700372smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
373{
374 struct TCP_Server_Info *server = tcon->ses->server;
375 unsigned int rsize;
376
377 /* start with specified rsize, or default */
378 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
379 rsize = min_t(unsigned int, rsize, server->max_read);
Long Li09902f82017-11-22 17:38:39 -0700380#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700381 if (server->rdma) {
382 if (server->sign)
383 rsize = min_t(unsigned int,
384 rsize, server->smbd_conn->max_fragmented_recv_size);
385 else
386 rsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700387 rsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700388 }
Long Li09902f82017-11-22 17:38:39 -0700389#endif
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400390
391 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
392 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700393
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700394 return rsize;
395}
396
Steve French3d621232018-09-25 15:33:47 -0500397static unsigned int
398smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
399{
400 struct TCP_Server_Info *server = tcon->ses->server;
401 unsigned int rsize;
402
403 /* start with specified rsize, or default */
404 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
405 rsize = min_t(unsigned int, rsize, server->max_read);
406#ifdef CONFIG_CIFS_SMB_DIRECT
407 if (server->rdma) {
408 if (server->sign)
409 rsize = min_t(unsigned int,
410 rsize, server->smbd_conn->max_fragmented_recv_size);
411 else
412 rsize = min_t(unsigned int,
413 rsize, server->smbd_conn->max_readwrite_size);
414 }
415#endif
416
417 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
418 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
419
420 return rsize;
421}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200422
423static int
424parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
425 size_t buf_len,
426 struct cifs_server_iface **iface_list,
427 size_t *iface_count)
428{
429 struct network_interface_info_ioctl_rsp *p;
430 struct sockaddr_in *addr4;
431 struct sockaddr_in6 *addr6;
432 struct iface_info_ipv4 *p4;
433 struct iface_info_ipv6 *p6;
434 struct cifs_server_iface *info;
435 ssize_t bytes_left;
436 size_t next = 0;
437 int nb_iface = 0;
438 int rc = 0;
439
440 *iface_list = NULL;
441 *iface_count = 0;
442
443 /*
444 * Fist pass: count and sanity check
445 */
446
447 bytes_left = buf_len;
448 p = buf;
449 while (bytes_left >= sizeof(*p)) {
450 nb_iface++;
451 next = le32_to_cpu(p->Next);
452 if (!next) {
453 bytes_left -= sizeof(*p);
454 break;
455 }
456 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
457 bytes_left -= next;
458 }
459
460 if (!nb_iface) {
461 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
462 rc = -EINVAL;
463 goto out;
464 }
465
466 if (bytes_left || p->Next)
467 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
468
469
470 /*
471 * Second pass: extract info to internal structure
472 */
473
474 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
475 if (!*iface_list) {
476 rc = -ENOMEM;
477 goto out;
478 }
479
480 info = *iface_list;
481 bytes_left = buf_len;
482 p = buf;
483 while (bytes_left >= sizeof(*p)) {
484 info->speed = le64_to_cpu(p->LinkSpeed);
485 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
486 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
487
488 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
489 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
490 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
491 le32_to_cpu(p->Capability));
492
493 switch (p->Family) {
494 /*
495 * The kernel and wire socket structures have the same
496 * layout and use network byte order but make the
497 * conversion explicit in case either one changes.
498 */
499 case INTERNETWORK:
500 addr4 = (struct sockaddr_in *)&info->sockaddr;
501 p4 = (struct iface_info_ipv4 *)p->Buffer;
502 addr4->sin_family = AF_INET;
503 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
504
505 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
506 addr4->sin_port = cpu_to_be16(CIFS_PORT);
507
508 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
509 &addr4->sin_addr);
510 break;
511 case INTERNETWORKV6:
512 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
513 p6 = (struct iface_info_ipv6 *)p->Buffer;
514 addr6->sin6_family = AF_INET6;
515 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
516
517 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
518 addr6->sin6_flowinfo = 0;
519 addr6->sin6_scope_id = 0;
520 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
521
522 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
523 &addr6->sin6_addr);
524 break;
525 default:
526 cifs_dbg(VFS,
527 "%s: skipping unsupported socket family\n",
528 __func__);
529 goto next_iface;
530 }
531
532 (*iface_count)++;
533 info++;
534next_iface:
535 next = le32_to_cpu(p->Next);
536 if (!next)
537 break;
538 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
539 bytes_left -= next;
540 }
541
542 if (!*iface_count) {
543 rc = -EINVAL;
544 goto out;
545 }
546
547out:
548 if (rc) {
549 kfree(*iface_list);
550 *iface_count = 0;
551 *iface_list = NULL;
552 }
553 return rc;
554}
555
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200556static int compare_iface(const void *ia, const void *ib)
557{
558 const struct cifs_server_iface *a = (struct cifs_server_iface *)ia;
559 const struct cifs_server_iface *b = (struct cifs_server_iface *)ib;
560
561 return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1);
562}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200563
Steve Frenchc481e9f2013-10-14 01:21:53 -0500564static int
565SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
566{
567 int rc;
568 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200569 struct network_interface_info_ioctl_rsp *out_buf = NULL;
570 struct cifs_server_iface *iface_list;
571 size_t iface_count;
572 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500573
574 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
575 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
576 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500577 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500578 if (rc == -EOPNOTSUPP) {
579 cifs_dbg(FYI,
580 "server does not support query network interfaces\n");
581 goto out;
582 } else if (rc != 0) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000583 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200584 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500585 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200586
587 rc = parse_server_interfaces(out_buf, ret_data_len,
588 &iface_list, &iface_count);
589 if (rc)
590 goto out;
591
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200592 /* sort interfaces from fastest to slowest */
593 sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL);
594
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200595 spin_lock(&ses->iface_lock);
596 kfree(ses->iface_list);
597 ses->iface_list = iface_list;
598 ses->iface_count = iface_count;
599 ses->iface_last_update = jiffies;
600 spin_unlock(&ses->iface_lock);
601
602out:
Steve French24df1482016-09-29 04:20:23 -0500603 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500604 return rc;
605}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500606
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000607static void
608smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000609{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000610 struct cached_fid *cfid = container_of(ref, struct cached_fid,
611 refcount);
612
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000613 if (cfid->is_valid) {
614 cifs_dbg(FYI, "clear cached root file handle\n");
615 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
616 cfid->fid->volatile_fid);
617 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000618 cfid->file_all_info_is_valid = false;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000619 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000620}
621
622void close_shroot(struct cached_fid *cfid)
623{
624 mutex_lock(&cfid->fid_mutex);
625 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000626 mutex_unlock(&cfid->fid_mutex);
627}
628
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000629void
630smb2_cached_lease_break(struct work_struct *work)
631{
632 struct cached_fid *cfid = container_of(work,
633 struct cached_fid, lease_break);
634
635 close_shroot(cfid);
636}
637
Steve French3d4ef9a2018-04-25 22:19:09 -0500638/*
639 * Open the directory at the root of a share
640 */
641int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
642{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000643 struct cifs_ses *ses = tcon->ses;
644 struct TCP_Server_Info *server = ses->server;
645 struct cifs_open_parms oparms;
646 struct smb2_create_rsp *o_rsp = NULL;
647 struct smb2_query_info_rsp *qi_rsp = NULL;
648 int resp_buftype[2];
649 struct smb_rqst rqst[2];
650 struct kvec rsp_iov[2];
651 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
652 struct kvec qi_iov[1];
653 int rc, flags = 0;
654 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000655 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Steve French3d4ef9a2018-04-25 22:19:09 -0500656
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000657 mutex_lock(&tcon->crfid.fid_mutex);
658 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500659 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000660 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000661 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000662 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500663 return 0;
664 }
665
Steve French96d9f7e2019-09-12 17:52:54 -0500666 /*
667 * We do not hold the lock for the open because in case
668 * SMB2_open needs to reconnect, it will end up calling
669 * cifs_mark_open_files_invalid() which takes the lock again
670 * thus causing a deadlock
671 */
672
673 mutex_unlock(&tcon->crfid.fid_mutex);
674
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000675 if (smb3_encryption_required(tcon))
676 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500677
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000678 memset(rqst, 0, sizeof(rqst));
679 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
680 memset(rsp_iov, 0, sizeof(rsp_iov));
681
682 /* Open */
683 memset(&open_iov, 0, sizeof(open_iov));
684 rqst[0].rq_iov = open_iov;
685 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
686
687 oparms.tcon = tcon;
688 oparms.create_options = 0;
689 oparms.desired_access = FILE_READ_ATTRIBUTES;
690 oparms.disposition = FILE_OPEN;
691 oparms.fid = pfid;
692 oparms.reconnect = false;
693
694 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
695 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500696 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000697 smb2_set_next_command(tcon, &rqst[0]);
698
699 memset(&qi_iov, 0, sizeof(qi_iov));
700 rqst[1].rq_iov = qi_iov;
701 rqst[1].rq_nvec = 1;
702
703 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
704 COMPOUND_FID, FILE_ALL_INFORMATION,
705 SMB2_O_INFO_FILE, 0,
706 sizeof(struct smb2_file_all_info) +
707 PATH_MAX * 2, 0, NULL);
708 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500709 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000710
711 smb2_set_related(&rqst[1]);
712
713 rc = compound_send_recv(xid, ses, flags, 2, rqst,
714 resp_buftype, rsp_iov);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200715 mutex_lock(&tcon->crfid.fid_mutex);
716
717 /*
718 * Now we need to check again as the cached root might have
719 * been successfully re-opened from a concurrent process
720 */
721
722 if (tcon->crfid.is_valid) {
723 /* work was already done */
724
725 /* stash fids for close() later */
726 struct cifs_fid fid = {
727 .persistent_fid = pfid->persistent_fid,
728 .volatile_fid = pfid->volatile_fid,
729 };
730
731 /*
732 * caller expects this func to set pfid to a valid
733 * cached root, so we copy the existing one and get a
734 * reference.
735 */
736 memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
737 kref_get(&tcon->crfid.refcount);
738
739 mutex_unlock(&tcon->crfid.fid_mutex);
740
741 if (rc == 0) {
742 /* close extra handle outside of crit sec */
743 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
744 }
745 goto oshr_free;
746 }
747
748 /* Cached root is still invalid, continue normaly */
749
Steve French7dcc82c2019-09-11 00:07:36 -0500750 if (rc) {
751 if (rc == -EREMCHG) {
752 tcon->need_reconnect = true;
753 printk_once(KERN_WARNING "server share %s deleted\n",
754 tcon->treeName);
755 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000756 goto oshr_exit;
Steve French7dcc82c2019-09-11 00:07:36 -0500757 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000758
Steve Frenchd2f15422019-09-22 00:55:46 -0500759 atomic_inc(&tcon->num_remote_opens);
760
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000761 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
762 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
763 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
764#ifdef CONFIG_CIFS_DEBUG2
765 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
766#endif /* CIFS_DEBUG2 */
767
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000768 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
769 tcon->crfid.tcon = tcon;
770 tcon->crfid.is_valid = true;
771 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000772
Steve French89a5bfa2019-07-18 17:22:18 -0500773 /* BB TBD check to see if oplock level check can be removed below */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000774 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
775 kref_get(&tcon->crfid.refcount);
Steve French89a5bfa2019-07-18 17:22:18 -0500776 smb2_parse_contexts(server, o_rsp,
777 &oparms.fid->epoch,
778 oparms.fid->lease_key, &oplock, NULL);
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000779 } else
780 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000781
782 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
783 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
784 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000785 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000786 le16_to_cpu(qi_rsp->OutputBufferOffset),
787 sizeof(struct smb2_file_all_info),
788 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000789 (char *)&tcon->crfid.file_all_info))
790 tcon->crfid.file_all_info_is_valid = 1;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000791
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200792oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000793 mutex_unlock(&tcon->crfid.fid_mutex);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200794oshr_free:
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000795 SMB2_open_free(&rqst[0]);
796 SMB2_query_info_free(&rqst[1]);
797 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
798 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French3d4ef9a2018-04-25 22:19:09 -0500799 return rc;
800}
801
Steve French34f62642013-10-09 02:07:00 -0500802static void
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500803smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
804{
805 int rc;
806 __le16 srch_path = 0; /* Null - open root of share */
807 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
808 struct cifs_open_parms oparms;
809 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500810 bool no_cached_open = tcon->nohandlecache;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500811
812 oparms.tcon = tcon;
813 oparms.desired_access = FILE_READ_ATTRIBUTES;
814 oparms.disposition = FILE_OPEN;
815 oparms.create_options = 0;
816 oparms.fid = &fid;
817 oparms.reconnect = false;
818
Steve French3d4ef9a2018-04-25 22:19:09 -0500819 if (no_cached_open)
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000820 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
821 NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500822 else
823 rc = open_shroot(xid, tcon, &fid);
824
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500825 if (rc)
826 return;
827
Steve Frenchc481e9f2013-10-14 01:21:53 -0500828 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500829
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500830 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
831 FS_ATTRIBUTE_INFORMATION);
832 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
833 FS_DEVICE_INFORMATION);
834 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500835 FS_VOLUME_INFORMATION);
836 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500837 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500838 if (no_cached_open)
839 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000840 else
841 close_shroot(&tcon->crfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500842}
843
844static void
Steve French34f62642013-10-09 02:07:00 -0500845smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
846{
847 int rc;
848 __le16 srch_path = 0; /* Null - open root of share */
849 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
850 struct cifs_open_parms oparms;
851 struct cifs_fid fid;
852
853 oparms.tcon = tcon;
854 oparms.desired_access = FILE_READ_ATTRIBUTES;
855 oparms.disposition = FILE_OPEN;
856 oparms.create_options = 0;
857 oparms.fid = &fid;
858 oparms.reconnect = false;
859
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000860 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500861 if (rc)
862 return;
863
Steven French21671142013-10-09 13:36:35 -0500864 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
865 FS_ATTRIBUTE_INFORMATION);
866 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
867 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500868 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -0500869}
870
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400871static int
872smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
873 struct cifs_sb_info *cifs_sb, const char *full_path)
874{
875 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400876 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700877 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400878 struct cifs_open_parms oparms;
879 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400880
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000881 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500882 return 0;
883
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400884 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
885 if (!utf16_path)
886 return -ENOMEM;
887
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400888 oparms.tcon = tcon;
889 oparms.desired_access = FILE_READ_ATTRIBUTES;
890 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -0500891 if (backup_cred(cifs_sb))
892 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
893 else
894 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400895 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400896 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400897
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000898 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400899 if (rc) {
900 kfree(utf16_path);
901 return rc;
902 }
903
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400904 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400905 kfree(utf16_path);
906 return rc;
907}
908
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400909static int
910smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
911 struct cifs_sb_info *cifs_sb, const char *full_path,
912 u64 *uniqueid, FILE_ALL_INFO *data)
913{
914 *uniqueid = le64_to_cpu(data->IndexNumber);
915 return 0;
916}
917
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700918static int
919smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
920 struct cifs_fid *fid, FILE_ALL_INFO *data)
921{
922 int rc;
923 struct smb2_file_all_info *smb2_data;
924
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400925 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700926 GFP_KERNEL);
927 if (smb2_data == NULL)
928 return -ENOMEM;
929
930 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
931 smb2_data);
932 if (!rc)
933 move_smb2_info_to_cifs(data, smb2_data);
934 kfree(smb2_data);
935 return rc;
936}
937
Arnd Bergmann1368f152017-09-05 11:24:15 +0200938#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000939static ssize_t
940move_smb2_ea_to_cifs(char *dst, size_t dst_size,
941 struct smb2_file_full_ea_info *src, size_t src_size,
942 const unsigned char *ea_name)
943{
944 int rc = 0;
945 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
946 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000947 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000948 size_t name_len, value_len, user_name_len;
949
950 while (src_size > 0) {
951 name = &src->ea_data[0];
952 name_len = (size_t)src->ea_name_length;
953 value = &src->ea_data[src->ea_name_length + 1];
954 value_len = (size_t)le16_to_cpu(src->ea_value_length);
955
Christoph Probsta205d502019-05-08 21:36:25 +0200956 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000957 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000958
959 if (src_size < 8 + name_len + 1 + value_len) {
960 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
961 rc = -EIO;
962 goto out;
963 }
964
965 if (ea_name) {
966 if (ea_name_len == name_len &&
967 memcmp(ea_name, name, name_len) == 0) {
968 rc = value_len;
969 if (dst_size == 0)
970 goto out;
971 if (dst_size < value_len) {
972 rc = -ERANGE;
973 goto out;
974 }
975 memcpy(dst, value, value_len);
976 goto out;
977 }
978 } else {
979 /* 'user.' plus a terminating null */
980 user_name_len = 5 + 1 + name_len;
981
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000982 if (buf_size == 0) {
983 /* skip copy - calc size only */
984 rc += user_name_len;
985 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000986 dst_size -= user_name_len;
987 memcpy(dst, "user.", 5);
988 dst += 5;
989 memcpy(dst, src->ea_data, name_len);
990 dst += name_len;
991 *dst = 0;
992 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000993 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000994 } else {
995 /* stop before overrun buffer */
996 rc = -ERANGE;
997 break;
998 }
999 }
1000
1001 if (!src->next_entry_offset)
1002 break;
1003
1004 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1005 /* stop before overrun buffer */
1006 rc = -ERANGE;
1007 break;
1008 }
1009 src_size -= le32_to_cpu(src->next_entry_offset);
1010 src = (void *)((char *)src +
1011 le32_to_cpu(src->next_entry_offset));
1012 }
1013
1014 /* didn't find the named attribute */
1015 if (ea_name)
1016 rc = -ENODATA;
1017
1018out:
1019 return (ssize_t)rc;
1020}
1021
1022static ssize_t
1023smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1024 const unsigned char *path, const unsigned char *ea_name,
1025 char *ea_data, size_t buf_size,
1026 struct cifs_sb_info *cifs_sb)
1027{
1028 int rc;
1029 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001030 struct kvec rsp_iov = {NULL, 0};
1031 int buftype = CIFS_NO_BUFFER;
1032 struct smb2_query_info_rsp *rsp;
1033 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001034
1035 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1036 if (!utf16_path)
1037 return -ENOMEM;
1038
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001039 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1040 FILE_READ_EA,
1041 FILE_FULL_EA_INFORMATION,
1042 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +10001043 CIFSMaxBufSize -
1044 MAX_SMB2_CREATE_RESPONSE_SIZE -
1045 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001046 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001047 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001048 /*
1049 * If ea_name is NULL (listxattr) and there are no EAs,
1050 * return 0 as it's not an error. Otherwise, the specified
1051 * ea_name was not found.
1052 */
1053 if (!ea_name && rc == -ENODATA)
1054 rc = 0;
1055 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001056 }
1057
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001058 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1059 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1060 le32_to_cpu(rsp->OutputBufferLength),
1061 &rsp_iov,
1062 sizeof(struct smb2_file_full_ea_info));
1063 if (rc)
1064 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001065
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001066 info = (struct smb2_file_full_ea_info *)(
1067 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1068 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1069 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001070
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001071 qeas_exit:
1072 kfree(utf16_path);
1073 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001074 return rc;
1075}
1076
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001077
1078static int
1079smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1080 const char *path, const char *ea_name, const void *ea_value,
1081 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1082 struct cifs_sb_info *cifs_sb)
1083{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001084 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001085 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001086 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001087 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001088 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001089 struct smb_rqst rqst[3];
1090 int resp_buftype[3];
1091 struct kvec rsp_iov[3];
1092 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1093 struct cifs_open_parms oparms;
1094 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1095 struct cifs_fid fid;
1096 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1097 unsigned int size[1];
1098 void *data[1];
1099 struct smb2_file_full_ea_info *ea = NULL;
1100 struct kvec close_iov[1];
1101 int rc;
1102
1103 if (smb3_encryption_required(tcon))
1104 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001105
1106 if (ea_name_len > 255)
1107 return -EINVAL;
1108
1109 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1110 if (!utf16_path)
1111 return -ENOMEM;
1112
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001113 memset(rqst, 0, sizeof(rqst));
1114 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1115 memset(rsp_iov, 0, sizeof(rsp_iov));
1116
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001117 if (ses->server->ops->query_all_EAs) {
1118 if (!ea_value) {
1119 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1120 ea_name, NULL, 0,
1121 cifs_sb);
1122 if (rc == -ENODATA)
1123 goto sea_exit;
1124 }
1125 }
1126
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001127 /* Open */
1128 memset(&open_iov, 0, sizeof(open_iov));
1129 rqst[0].rq_iov = open_iov;
1130 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1131
1132 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001133 oparms.tcon = tcon;
1134 oparms.desired_access = FILE_WRITE_EA;
1135 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05001136 if (backup_cred(cifs_sb))
1137 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1138 else
1139 oparms.create_options = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001140 oparms.fid = &fid;
1141 oparms.reconnect = false;
1142
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001143 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
1144 if (rc)
1145 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001146 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001147
1148
1149 /* Set Info */
1150 memset(&si_iov, 0, sizeof(si_iov));
1151 rqst[1].rq_iov = si_iov;
1152 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001153
1154 len = sizeof(ea) + ea_name_len + ea_value_len + 1;
1155 ea = kzalloc(len, GFP_KERNEL);
1156 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001157 rc = -ENOMEM;
1158 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001159 }
1160
1161 ea->ea_name_length = ea_name_len;
1162 ea->ea_value_length = cpu_to_le16(ea_value_len);
1163 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1164 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1165
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001166 size[0] = len;
1167 data[0] = ea;
1168
1169 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
1170 COMPOUND_FID, current->tgid,
1171 FILE_FULL_EA_INFORMATION,
1172 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001173 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001174 smb2_set_related(&rqst[1]);
1175
1176
1177 /* Close */
1178 memset(&close_iov, 0, sizeof(close_iov));
1179 rqst[2].rq_iov = close_iov;
1180 rqst[2].rq_nvec = 1;
Steve French43f8a6a2019-12-02 21:46:54 -06001181 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001182 smb2_set_related(&rqst[2]);
1183
1184 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1185 resp_buftype, rsp_iov);
Steve Frenchd2f15422019-09-22 00:55:46 -05001186 /* no need to bump num_remote_opens because handle immediately closed */
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001187
1188 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001189 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001190 kfree(utf16_path);
1191 SMB2_open_free(&rqst[0]);
1192 SMB2_set_info_free(&rqst[1]);
1193 SMB2_close_free(&rqst[2]);
1194 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1195 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1196 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001197 return rc;
1198}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001199#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001200
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001201static bool
1202smb2_can_echo(struct TCP_Server_Info *server)
1203{
1204 return server->echoes;
1205}
1206
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001207static void
1208smb2_clear_stats(struct cifs_tcon *tcon)
1209{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001210 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001211
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001212 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1213 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1214 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1215 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001216}
1217
1218static void
Steve French769ee6a2013-06-19 14:15:30 -05001219smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1220{
1221 seq_puts(m, "\n\tShare Capabilities:");
1222 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1223 seq_puts(m, " DFS,");
1224 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1225 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1226 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1227 seq_puts(m, " SCALEOUT,");
1228 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1229 seq_puts(m, " CLUSTER,");
1230 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1231 seq_puts(m, " ASYMMETRIC,");
1232 if (tcon->capabilities == 0)
1233 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001234 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1235 seq_puts(m, " Aligned,");
1236 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1237 seq_puts(m, " Partition Aligned,");
1238 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1239 seq_puts(m, " SSD,");
1240 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1241 seq_puts(m, " TRIM-support,");
1242
Steve French769ee6a2013-06-19 14:15:30 -05001243 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001244 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001245 if (tcon->perf_sector_size)
1246 seq_printf(m, "\tOptimal sector size: 0x%x",
1247 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001248 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001249}
1250
1251static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001252smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1253{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001254 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1255 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001256
1257 /*
1258 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1259 * totals (requests sent) since those SMBs are per-session not per tcon
1260 */
Steve French52ce1ac2018-07-31 01:46:47 -05001261 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1262 (long long)(tcon->bytes_read),
1263 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001264 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1265 atomic_read(&tcon->num_local_opens),
1266 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001267 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001268 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1269 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001270 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001271 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1272 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001273 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001274 atomic_read(&sent[SMB2_CREATE_HE]),
1275 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001276 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001277 atomic_read(&sent[SMB2_CLOSE_HE]),
1278 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001279 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001280 atomic_read(&sent[SMB2_FLUSH_HE]),
1281 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001282 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001283 atomic_read(&sent[SMB2_READ_HE]),
1284 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001285 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001286 atomic_read(&sent[SMB2_WRITE_HE]),
1287 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001288 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001289 atomic_read(&sent[SMB2_LOCK_HE]),
1290 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001291 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001292 atomic_read(&sent[SMB2_IOCTL_HE]),
1293 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001294 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001295 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1296 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001297 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001298 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1299 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001300 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001301 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1302 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001303 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001304 atomic_read(&sent[SMB2_SET_INFO_HE]),
1305 atomic_read(&failed[SMB2_SET_INFO_HE]));
1306 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1307 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1308 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001309}
1310
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001311static void
1312smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1313{
David Howells2b0143b2015-03-17 22:25:59 +00001314 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001315 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1316
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001317 cfile->fid.persistent_fid = fid->persistent_fid;
1318 cfile->fid.volatile_fid = fid->volatile_fid;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001319#ifdef CONFIG_CIFS_DEBUG2
1320 cfile->fid.mid = fid->mid;
1321#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001322 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1323 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001324 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001325 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001326}
1327
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001328static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001329smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1330 struct cifs_fid *fid)
1331{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001332 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001333}
1334
Steve French43f8a6a2019-12-02 21:46:54 -06001335static void
1336smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
1337 struct cifsFileInfo *cfile)
1338{
1339 struct smb2_file_network_open_info file_inf;
1340 struct inode *inode;
1341 int rc;
1342
1343 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
1344 cfile->fid.volatile_fid, &file_inf);
1345 if (rc)
1346 return;
1347
1348 inode = d_inode(cfile->dentry);
1349
1350 spin_lock(&inode->i_lock);
1351 CIFS_I(inode)->time = jiffies;
1352
1353 /* Creation time should not need to be updated on close */
1354 if (file_inf.LastWriteTime)
1355 inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
1356 if (file_inf.ChangeTime)
1357 inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
1358 if (file_inf.LastAccessTime)
1359 inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
1360
1361 /*
1362 * i_blocks is not related to (i_size / i_blksize),
1363 * but instead 512 byte (2**9) size is required for
1364 * calculating num blocks.
1365 */
1366 if (le64_to_cpu(file_inf.AllocationSize) > 4096)
1367 inode->i_blocks =
1368 (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
1369
1370 /* End of file and Attributes should not have to be updated on close */
1371 spin_unlock(&inode->i_lock);
1372}
1373
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001374static int
Steve French41c13582013-11-14 00:05:36 -06001375SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1376 u64 persistent_fid, u64 volatile_fid,
1377 struct copychunk_ioctl *pcchunk)
1378{
1379 int rc;
1380 unsigned int ret_data_len;
1381 struct resume_key_req *res_key;
1382
1383 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1384 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001385 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001386 (char **)&res_key, &ret_data_len);
1387
1388 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001389 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
Steve French41c13582013-11-14 00:05:36 -06001390 goto req_res_key_exit;
1391 }
1392 if (ret_data_len < sizeof(struct resume_key_req)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001393 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
Steve French41c13582013-11-14 00:05:36 -06001394 rc = -EINVAL;
1395 goto req_res_key_exit;
1396 }
1397 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1398
1399req_res_key_exit:
1400 kfree(res_key);
1401 return rc;
1402}
1403
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001404static int
1405smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001406 struct cifs_tcon *tcon,
1407 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001408 unsigned long p)
1409{
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001410 struct cifs_ses *ses = tcon->ses;
1411 char __user *arg = (char __user *)p;
1412 struct smb_query_info qi;
1413 struct smb_query_info __user *pqi;
1414 int rc = 0;
1415 int flags = 0;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001416 struct smb2_query_info_rsp *qi_rsp = NULL;
1417 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001418 void *buffer = NULL;
1419 struct smb_rqst rqst[3];
1420 int resp_buftype[3];
1421 struct kvec rsp_iov[3];
1422 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1423 struct cifs_open_parms oparms;
1424 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1425 struct cifs_fid fid;
1426 struct kvec qi_iov[1];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001427 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001428 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001429 struct kvec close_iov[1];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001430 unsigned int size[2];
1431 void *data[2];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001432
1433 memset(rqst, 0, sizeof(rqst));
1434 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1435 memset(rsp_iov, 0, sizeof(rsp_iov));
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001436
1437 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1438 return -EFAULT;
1439
1440 if (qi.output_buffer_length > 1024)
1441 return -EINVAL;
1442
1443 if (!ses || !(ses->server))
1444 return -EIO;
1445
1446 if (smb3_encryption_required(tcon))
1447 flags |= CIFS_TRANSFORM_REQ;
1448
Markus Elfringcfaa1182019-11-05 21:30:25 +01001449 buffer = memdup_user(arg + sizeof(struct smb_query_info),
1450 qi.output_buffer_length);
1451 if (IS_ERR(buffer))
1452 return PTR_ERR(buffer);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001453
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001454 /* Open */
1455 memset(&open_iov, 0, sizeof(open_iov));
1456 rqst[0].rq_iov = open_iov;
1457 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001458
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001459 memset(&oparms, 0, sizeof(oparms));
1460 oparms.tcon = tcon;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001461 oparms.disposition = FILE_OPEN;
1462 if (is_dir)
1463 oparms.create_options = CREATE_NOT_FILE;
1464 else
1465 oparms.create_options = CREATE_NOT_DIR;
1466 oparms.fid = &fid;
1467 oparms.reconnect = false;
1468
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001469 if (qi.flags & PASSTHRU_FSCTL) {
1470 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1471 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1472 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001473 break;
1474 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1475 oparms.desired_access = GENERIC_ALL;
1476 break;
1477 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1478 oparms.desired_access = GENERIC_READ;
1479 break;
1480 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1481 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001482 break;
1483 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001484 } else if (qi.flags & PASSTHRU_SET_INFO) {
1485 oparms.desired_access = GENERIC_WRITE;
1486 } else {
1487 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001488 }
1489
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001490 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1491 if (rc)
1492 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001493 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001494
1495 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001496 if (qi.flags & PASSTHRU_FSCTL) {
1497 /* Can eventually relax perm check since server enforces too */
1498 if (!capable(CAP_SYS_ADMIN))
1499 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001500 else {
1501 memset(&io_iov, 0, sizeof(io_iov));
1502 rqst[1].rq_iov = io_iov;
1503 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1504
1505 rc = SMB2_ioctl_init(tcon, &rqst[1],
1506 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001507 qi.info_type, true, buffer,
1508 qi.output_buffer_length,
1509 CIFSMaxBufSize);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001510 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001511 } else if (qi.flags == PASSTHRU_SET_INFO) {
1512 /* Can eventually relax perm check since server enforces too */
1513 if (!capable(CAP_SYS_ADMIN))
1514 rc = -EPERM;
1515 else {
1516 memset(&si_iov, 0, sizeof(si_iov));
1517 rqst[1].rq_iov = si_iov;
1518 rqst[1].rq_nvec = 1;
1519
1520 size[0] = 8;
1521 data[0] = buffer;
1522
1523 rc = SMB2_set_info_init(tcon, &rqst[1],
1524 COMPOUND_FID, COMPOUND_FID,
1525 current->tgid,
1526 FILE_END_OF_FILE_INFORMATION,
1527 SMB2_O_INFO_FILE, 0, data, size);
1528 }
Steve French31ba4332019-03-13 02:40:07 -05001529 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1530 memset(&qi_iov, 0, sizeof(qi_iov));
1531 rqst[1].rq_iov = qi_iov;
1532 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001533
Steve French31ba4332019-03-13 02:40:07 -05001534 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
1535 COMPOUND_FID, qi.file_info_class,
1536 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001537 qi.input_buffer_length,
1538 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001539 } else { /* unknown flags */
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001540 cifs_tcon_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
Steve French31ba4332019-03-13 02:40:07 -05001541 rc = -EINVAL;
1542 }
1543
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001544 if (rc)
1545 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001546 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001547 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001548
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001549 /* Close */
1550 memset(&close_iov, 0, sizeof(close_iov));
1551 rqst[2].rq_iov = close_iov;
1552 rqst[2].rq_nvec = 1;
1553
Steve French43f8a6a2019-12-02 21:46:54 -06001554 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001555 if (rc)
1556 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001557 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001558
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001559 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1560 resp_buftype, rsp_iov);
1561 if (rc)
1562 goto iqinf_exit;
Steve Frenchd2f15422019-09-22 00:55:46 -05001563
1564 /* No need to bump num_remote_opens since handle immediately closed */
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001565 if (qi.flags & PASSTHRU_FSCTL) {
1566 pqi = (struct smb_query_info __user *)arg;
1567 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1568 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1569 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001570 if (qi.input_buffer_length > 0 &&
Markus Elfring2b1116b2019-11-05 22:26:53 +01001571 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1572 > rsp_iov[1].iov_len)
1573 goto e_fault;
1574
1575 if (copy_to_user(&pqi->input_buffer_length,
1576 &qi.input_buffer_length,
1577 sizeof(qi.input_buffer_length)))
1578 goto e_fault;
1579
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001580 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1581 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
Markus Elfring2b1116b2019-11-05 22:26:53 +01001582 qi.input_buffer_length))
1583 goto e_fault;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001584 } else {
1585 pqi = (struct smb_query_info __user *)arg;
1586 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1587 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1588 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
Markus Elfring2b1116b2019-11-05 22:26:53 +01001589 if (copy_to_user(&pqi->input_buffer_length,
1590 &qi.input_buffer_length,
1591 sizeof(qi.input_buffer_length)))
1592 goto e_fault;
1593
1594 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1595 qi.input_buffer_length))
1596 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001597 }
1598
1599 iqinf_exit:
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001600 kfree(buffer);
1601 SMB2_open_free(&rqst[0]);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001602 if (qi.flags & PASSTHRU_FSCTL)
1603 SMB2_ioctl_free(&rqst[1]);
1604 else
1605 SMB2_query_info_free(&rqst[1]);
1606
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001607 SMB2_close_free(&rqst[2]);
1608 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1609 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1610 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001611 return rc;
Markus Elfring2b1116b2019-11-05 22:26:53 +01001612
1613e_fault:
1614 rc = -EFAULT;
1615 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001616}
1617
Sachin Prabhu620d8742017-02-10 16:03:51 +05301618static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001619smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001620 struct cifsFileInfo *srcfile,
1621 struct cifsFileInfo *trgtfile, u64 src_off,
1622 u64 len, u64 dest_off)
1623{
1624 int rc;
1625 unsigned int ret_data_len;
1626 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001627 struct copychunk_ioctl_rsp *retbuf = NULL;
1628 struct cifs_tcon *tcon;
1629 int chunks_copied = 0;
1630 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301631 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001632
1633 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1634
1635 if (pcchunk == NULL)
1636 return -ENOMEM;
1637
Christoph Probsta205d502019-05-08 21:36:25 +02001638 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001639 /* Request a key from the server to identify the source of the copy */
1640 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1641 srcfile->fid.persistent_fid,
1642 srcfile->fid.volatile_fid, pcchunk);
1643
1644 /* Note: request_res_key sets res_key null only if rc !=0 */
1645 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001646 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001647
1648 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001649 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001650 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001651 pcchunk->Reserved2 = 0;
1652
Steve French9bf0c9c2013-11-16 18:05:28 -06001653 tcon = tlink_tcon(trgtfile->tlink);
1654
1655 while (len > 0) {
1656 pcchunk->SourceOffset = cpu_to_le64(src_off);
1657 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1658 pcchunk->Length =
1659 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1660
1661 /* Request server copy to target from src identified by key */
1662 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001663 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001664 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001665 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1666 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001667 if (rc == 0) {
1668 if (ret_data_len !=
1669 sizeof(struct copychunk_ioctl_rsp)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001670 cifs_tcon_dbg(VFS, "invalid cchunk response size\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001671 rc = -EIO;
1672 goto cchunk_out;
1673 }
1674 if (retbuf->TotalBytesWritten == 0) {
1675 cifs_dbg(FYI, "no bytes copied\n");
1676 rc = -EIO;
1677 goto cchunk_out;
1678 }
1679 /*
1680 * Check if server claimed to write more than we asked
1681 */
1682 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1683 le32_to_cpu(pcchunk->Length)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001684 cifs_tcon_dbg(VFS, "invalid copy chunk response\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001685 rc = -EIO;
1686 goto cchunk_out;
1687 }
1688 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001689 cifs_tcon_dbg(VFS, "invalid num chunks written\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001690 rc = -EIO;
1691 goto cchunk_out;
1692 }
1693 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001694
Sachin Prabhu620d8742017-02-10 16:03:51 +05301695 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1696 src_off += bytes_written;
1697 dest_off += bytes_written;
1698 len -= bytes_written;
1699 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001700
Sachin Prabhu620d8742017-02-10 16:03:51 +05301701 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001702 le32_to_cpu(retbuf->ChunksWritten),
1703 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301704 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001705 } else if (rc == -EINVAL) {
1706 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1707 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001708
Steve French9bf0c9c2013-11-16 18:05:28 -06001709 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1710 le32_to_cpu(retbuf->ChunksWritten),
1711 le32_to_cpu(retbuf->ChunkBytesWritten),
1712 le32_to_cpu(retbuf->TotalBytesWritten));
1713
1714 /*
1715 * Check if this is the first request using these sizes,
1716 * (ie check if copy succeed once with original sizes
1717 * and check if the server gave us different sizes after
1718 * we already updated max sizes on previous request).
1719 * if not then why is the server returning an error now
1720 */
1721 if ((chunks_copied != 0) || chunk_sizes_updated)
1722 goto cchunk_out;
1723
1724 /* Check that server is not asking us to grow size */
1725 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1726 tcon->max_bytes_chunk)
1727 tcon->max_bytes_chunk =
1728 le32_to_cpu(retbuf->ChunkBytesWritten);
1729 else
1730 goto cchunk_out; /* server gave us bogus size */
1731
1732 /* No need to change MaxChunks since already set to 1 */
1733 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001734 } else
1735 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001736 }
1737
1738cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001739 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001740 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301741 if (rc)
1742 return rc;
1743 else
1744 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001745}
1746
1747static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001748smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1749 struct cifs_fid *fid)
1750{
1751 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1752}
1753
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001754static unsigned int
1755smb2_read_data_offset(char *buf)
1756{
1757 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001758
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001759 return rsp->DataOffset;
1760}
1761
1762static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001763smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001764{
1765 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001766
1767 if (in_remaining)
1768 return le32_to_cpu(rsp->DataRemaining);
1769
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001770 return le32_to_cpu(rsp->DataLength);
1771}
1772
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001773
1774static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001775smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001776 struct cifs_io_parms *parms, unsigned int *bytes_read,
1777 char **buf, int *buf_type)
1778{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001779 parms->persistent_fid = pfid->persistent_fid;
1780 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001781 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1782}
1783
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001784static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001785smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001786 struct cifs_io_parms *parms, unsigned int *written,
1787 struct kvec *iov, unsigned long nr_segs)
1788{
1789
Steve Frenchdb8b6312014-09-22 05:13:55 -05001790 parms->persistent_fid = pfid->persistent_fid;
1791 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001792 return SMB2_write(xid, parms, written, iov, nr_segs);
1793}
1794
Steve Frenchd43cc792014-08-13 17:16:29 -05001795/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1796static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1797 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1798{
1799 struct cifsInodeInfo *cifsi;
1800 int rc;
1801
1802 cifsi = CIFS_I(inode);
1803
1804 /* if file already sparse don't bother setting sparse again */
1805 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1806 return true; /* already sparse */
1807
1808 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1809 return true; /* already not sparse */
1810
1811 /*
1812 * Can't check for sparse support on share the usual way via the
1813 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1814 * since Samba server doesn't set the flag on the share, yet
1815 * supports the set sparse FSCTL and returns sparse correctly
1816 * in the file attributes. If we fail setting sparse though we
1817 * mark that server does not support sparse files for this share
1818 * to avoid repeatedly sending the unsupported fsctl to server
1819 * if the file is repeatedly extended.
1820 */
1821 if (tcon->broken_sparse_sup)
1822 return false;
1823
1824 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1825 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001826 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05001827 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001828 if (rc) {
1829 tcon->broken_sparse_sup = true;
1830 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1831 return false;
1832 }
1833
1834 if (setsparse)
1835 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1836 else
1837 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1838
1839 return true;
1840}
1841
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001842static int
1843smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1844 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1845{
1846 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001847 struct inode *inode;
1848
1849 /*
1850 * If extending file more than one page make sparse. Many Linux fs
1851 * make files sparse by default when extending via ftruncate
1852 */
David Howells2b0143b2015-03-17 22:25:59 +00001853 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001854
1855 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001856 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001857
Steve Frenchd43cc792014-08-13 17:16:29 -05001858 /* whether set sparse succeeds or not, extend the file */
1859 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001860 }
1861
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001862 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001863 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001864}
1865
Steve French02b16662015-06-27 21:18:36 -07001866static int
1867smb2_duplicate_extents(const unsigned int xid,
1868 struct cifsFileInfo *srcfile,
1869 struct cifsFileInfo *trgtfile, u64 src_off,
1870 u64 len, u64 dest_off)
1871{
1872 int rc;
1873 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001874 struct duplicate_extents_to_file dup_ext_buf;
1875 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1876
1877 /* server fileays advertise duplicate extent support with this flag */
1878 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1879 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1880 return -EOPNOTSUPP;
1881
1882 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1883 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1884 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1885 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1886 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02001887 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07001888 src_off, dest_off, len);
1889
1890 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1891 if (rc)
1892 goto duplicate_extents_out;
1893
1894 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1895 trgtfile->fid.volatile_fid,
1896 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001897 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001898 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001899 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05001900 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07001901 &ret_data_len);
1902
1903 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02001904 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07001905
1906duplicate_extents_out:
1907 return rc;
1908}
Steve French02b16662015-06-27 21:18:36 -07001909
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001910static int
Steve French64a5cfa2013-10-14 15:31:32 -05001911smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1912 struct cifsFileInfo *cfile)
1913{
1914 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1915 cfile->fid.volatile_fid);
1916}
1917
1918static int
Steve Frenchb3152e22015-06-24 03:17:02 -05001919smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1920 struct cifsFileInfo *cfile)
1921{
1922 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05001923 unsigned int ret_data_len;
1924
1925 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1926 integr_info.Flags = 0;
1927 integr_info.Reserved = 0;
1928
1929 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1930 cfile->fid.volatile_fid,
1931 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001932 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001933 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05001934 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05001935 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05001936 &ret_data_len);
1937
1938}
1939
Steve Frenche02789a2018-08-09 14:33:12 -05001940/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1941#define GMT_TOKEN_SIZE 50
1942
Steve French153322f2019-03-28 22:32:49 -05001943#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1944
Steve Frenche02789a2018-08-09 14:33:12 -05001945/*
1946 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1947 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1948 */
Steve Frenchb3152e22015-06-24 03:17:02 -05001949static int
Steve French834170c2016-09-30 21:14:26 -05001950smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1951 struct cifsFileInfo *cfile, void __user *ioc_buf)
1952{
1953 char *retbuf = NULL;
1954 unsigned int ret_data_len = 0;
1955 int rc;
Steve French153322f2019-03-28 22:32:49 -05001956 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05001957 struct smb_snapshot_array snapshot_in;
1958
Steve French973189a2019-04-04 00:41:04 -05001959 /*
1960 * On the first query to enumerate the list of snapshots available
1961 * for this volume the buffer begins with 0 (number of snapshots
1962 * which can be returned is zero since at that point we do not know
1963 * how big the buffer needs to be). On the second query,
1964 * it (ret_data_len) is set to number of snapshots so we can
1965 * know to set the maximum response size larger (see below).
1966 */
Steve French153322f2019-03-28 22:32:49 -05001967 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
1968 return -EFAULT;
1969
1970 /*
1971 * Note that for snapshot queries that servers like Azure expect that
1972 * the first query be minimal size (and just used to get the number/size
1973 * of previous versions) so response size must be specified as EXACTLY
1974 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
1975 * of eight bytes.
1976 */
1977 if (ret_data_len == 0)
1978 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
1979 else
1980 max_response_size = CIFSMaxBufSize;
1981
Steve French834170c2016-09-30 21:14:26 -05001982 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1983 cfile->fid.volatile_fid,
1984 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001985 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001986 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05001987 (char **)&retbuf,
1988 &ret_data_len);
1989 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
1990 rc, ret_data_len);
1991 if (rc)
1992 return rc;
1993
1994 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
1995 /* Fixup buffer */
1996 if (copy_from_user(&snapshot_in, ioc_buf,
1997 sizeof(struct smb_snapshot_array))) {
1998 rc = -EFAULT;
1999 kfree(retbuf);
2000 return rc;
2001 }
Steve French834170c2016-09-30 21:14:26 -05002002
Steve Frenche02789a2018-08-09 14:33:12 -05002003 /*
2004 * Check for min size, ie not large enough to fit even one GMT
2005 * token (snapshot). On the first ioctl some users may pass in
2006 * smaller size (or zero) to simply get the size of the array
2007 * so the user space caller can allocate sufficient memory
2008 * and retry the ioctl again with larger array size sufficient
2009 * to hold all of the snapshot GMT tokens on the second try.
2010 */
2011 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2012 ret_data_len = sizeof(struct smb_snapshot_array);
2013
2014 /*
2015 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2016 * the snapshot array (of 50 byte GMT tokens) each
2017 * representing an available previous version of the data
2018 */
2019 if (ret_data_len > (snapshot_in.snapshot_array_size +
2020 sizeof(struct smb_snapshot_array)))
2021 ret_data_len = snapshot_in.snapshot_array_size +
2022 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05002023
2024 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2025 rc = -EFAULT;
2026 }
2027
2028 kfree(retbuf);
2029 return rc;
2030}
2031
2032static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002033smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2034 const char *path, struct cifs_sb_info *cifs_sb,
2035 struct cifs_fid *fid, __u16 search_flags,
2036 struct cifs_search_info *srch_inf)
2037{
2038 __le16 *utf16_path;
2039 int rc;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002040 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002041 struct cifs_open_parms oparms;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002042
2043 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2044 if (!utf16_path)
2045 return -ENOMEM;
2046
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002047 oparms.tcon = tcon;
2048 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2049 oparms.disposition = FILE_OPEN;
Steve French5e196972018-08-27 17:04:13 -05002050 if (backup_cred(cifs_sb))
2051 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2052 else
2053 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002054 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002055 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002056
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002057 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002058 kfree(utf16_path);
2059 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07002060 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002061 return rc;
2062 }
2063
2064 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02002065 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002066
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002067 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
2068 fid->volatile_fid, 0, srch_inf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002069 if (rc) {
Pavel Shilovskydcd878382017-06-06 16:58:58 -07002070 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002071 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002072 }
2073 return rc;
2074}
2075
2076static int
2077smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2078 struct cifs_fid *fid, __u16 search_flags,
2079 struct cifs_search_info *srch_inf)
2080{
2081 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2082 fid->volatile_fid, 0, srch_inf);
2083}
2084
2085static int
2086smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2087 struct cifs_fid *fid)
2088{
2089 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2090}
2091
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002092/*
Christoph Probsta205d502019-05-08 21:36:25 +02002093 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2094 * the number of credits and return true. Otherwise - return false.
2095 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002096static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002097smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002098{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002099 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002100
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002101 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002102 return false;
2103
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002104 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002105 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002106 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002107 spin_unlock(&server->req_lock);
2108 wake_up(&server->request_q);
2109 }
2110
2111 return true;
2112}
2113
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002114static bool
2115smb2_is_session_expired(char *buf)
2116{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002117 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002118
Mark Symsd81243c2018-05-24 09:47:31 +01002119 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2120 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002121 return false;
2122
Steve Frenche68a9322018-07-30 14:23:58 -05002123 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2124 le16_to_cpu(shdr->Command),
2125 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002126 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002127
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002128 return true;
2129}
2130
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002131static int
2132smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2133 struct cifsInodeInfo *cinode)
2134{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002135 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2136 return SMB2_lease_break(0, tcon, cinode->lease_key,
2137 smb2_get_lease_state(cinode));
2138
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002139 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2140 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002141 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002142}
2143
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002144void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002145smb2_set_related(struct smb_rqst *rqst)
2146{
2147 struct smb2_sync_hdr *shdr;
2148
2149 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002150 if (shdr == NULL) {
2151 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2152 return;
2153 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002154 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2155}
2156
2157char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2158
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002159void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002160smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002161{
2162 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002163 struct cifs_ses *ses = tcon->ses;
2164 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002165 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002166 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002167
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002168 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2169 if (shdr == NULL) {
2170 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2171 return;
2172 }
2173
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002174 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002175
2176 /* No padding needed */
2177 if (!(len & 7))
2178 goto finished;
2179
2180 num_padding = 8 - (len & 7);
2181 if (!smb3_encryption_required(tcon)) {
2182 /*
2183 * If we do not have encryption then we can just add an extra
2184 * iov for the padding.
2185 */
2186 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2187 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2188 rqst->rq_nvec++;
2189 len += num_padding;
2190 } else {
2191 /*
2192 * We can not add a small padding iov for the encryption case
2193 * because the encryption framework can not handle the padding
2194 * iovs.
2195 * We have to flatten this into a single buffer and add
2196 * the padding to it.
2197 */
2198 for (i = 1; i < rqst->rq_nvec; i++) {
2199 memcpy(rqst->rq_iov[0].iov_base +
2200 rqst->rq_iov[0].iov_len,
2201 rqst->rq_iov[i].iov_base,
2202 rqst->rq_iov[i].iov_len);
2203 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002204 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002205 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2206 0, num_padding);
2207 rqst->rq_iov[0].iov_len += num_padding;
2208 len += num_padding;
2209 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002210 }
2211
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002212 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002213 shdr->NextCommand = cpu_to_le32(len);
2214}
2215
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002216/*
2217 * Passes the query info response back to the caller on success.
2218 * Caller need to free this with free_rsp_buf().
2219 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002220int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002221smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2222 __le16 *utf16_path, u32 desired_access,
2223 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002224 struct kvec *rsp, int *buftype,
2225 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002226{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002227 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002228 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002229 struct smb_rqst rqst[3];
2230 int resp_buftype[3];
2231 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002232 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002233 struct kvec qi_iov[1];
2234 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002235 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002236 struct cifs_open_parms oparms;
2237 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002238 int rc;
2239
2240 if (smb3_encryption_required(tcon))
2241 flags |= CIFS_TRANSFORM_REQ;
2242
2243 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002244 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002245 memset(rsp_iov, 0, sizeof(rsp_iov));
2246
2247 memset(&open_iov, 0, sizeof(open_iov));
2248 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002249 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002250
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002251 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002252 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002253 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002254 if (cifs_sb && backup_cred(cifs_sb))
2255 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2256 else
2257 oparms.create_options = 0;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002258 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002259 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002260
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002261 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002262 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002263 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002264 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002265
2266 memset(&qi_iov, 0, sizeof(qi_iov));
2267 rqst[1].rq_iov = qi_iov;
2268 rqst[1].rq_nvec = 1;
2269
2270 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002271 class, type, 0,
2272 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002273 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002274 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002275 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002276 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002277 smb2_set_related(&rqst[1]);
2278
2279 memset(&close_iov, 0, sizeof(close_iov));
2280 rqst[2].rq_iov = close_iov;
2281 rqst[2].rq_nvec = 1;
2282
Steve French43f8a6a2019-12-02 21:46:54 -06002283 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002284 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002285 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002286 smb2_set_related(&rqst[2]);
2287
2288 rc = compound_send_recv(xid, ses, flags, 3, rqst,
2289 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002290 if (rc) {
2291 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French7dcc82c2019-09-11 00:07:36 -05002292 if (rc == -EREMCHG) {
2293 tcon->need_reconnect = true;
2294 printk_once(KERN_WARNING "server share %s deleted\n",
2295 tcon->treeName);
2296 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002297 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002298 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002299 *rsp = rsp_iov[1];
2300 *buftype = resp_buftype[1];
2301
2302 qic_exit:
2303 SMB2_open_free(&rqst[0]);
2304 SMB2_query_info_free(&rqst[1]);
2305 SMB2_close_free(&rqst[2]);
2306 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2307 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2308 return rc;
2309}
2310
2311static int
2312smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2313 struct kstatfs *buf)
2314{
2315 struct smb2_query_info_rsp *rsp;
2316 struct smb2_fs_full_size_info *info = NULL;
2317 __le16 utf16_path = 0; /* Null - open root of share */
2318 struct kvec rsp_iov = {NULL, 0};
2319 int buftype = CIFS_NO_BUFFER;
2320 int rc;
2321
2322
2323 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2324 FILE_READ_ATTRIBUTES,
2325 FS_FULL_SIZE_INFORMATION,
2326 SMB2_O_INFO_FILESYSTEM,
2327 sizeof(struct smb2_fs_full_size_info),
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002328 &rsp_iov, &buftype, NULL);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002329 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002330 goto qfs_exit;
2331
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002332 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002333 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002334 info = (struct smb2_fs_full_size_info *)(
2335 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2336 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2337 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002338 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002339 sizeof(struct smb2_fs_full_size_info));
2340 if (!rc)
2341 smb2_copy_fs_info_to_kstatfs(info, buf);
2342
2343qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002344 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002345 return rc;
2346}
2347
Steve French2d304212018-06-24 23:28:12 -05002348static int
2349smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2350 struct kstatfs *buf)
2351{
2352 int rc;
2353 __le16 srch_path = 0; /* Null - open root of share */
2354 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2355 struct cifs_open_parms oparms;
2356 struct cifs_fid fid;
2357
2358 if (!tcon->posix_extensions)
2359 return smb2_queryfs(xid, tcon, buf);
2360
2361 oparms.tcon = tcon;
2362 oparms.desired_access = FILE_READ_ATTRIBUTES;
2363 oparms.disposition = FILE_OPEN;
2364 oparms.create_options = 0;
2365 oparms.fid = &fid;
2366 oparms.reconnect = false;
2367
2368 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
2369 if (rc)
2370 return rc;
2371
2372 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2373 fid.volatile_fid, buf);
2374 buf->f_type = SMB2_MAGIC_NUMBER;
2375 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2376 return rc;
2377}
Steve French2d304212018-06-24 23:28:12 -05002378
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002379static bool
2380smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2381{
2382 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2383 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2384}
2385
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002386static int
2387smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2388 __u64 length, __u32 type, int lock, int unlock, bool wait)
2389{
2390 if (unlock && !lock)
2391 type = SMB2_LOCKFLAG_UNLOCK;
2392 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2393 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2394 current->tgid, length, offset, type, wait);
2395}
2396
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002397static void
2398smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2399{
2400 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2401}
2402
2403static void
2404smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2405{
2406 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2407}
2408
2409static void
2410smb2_new_lease_key(struct cifs_fid *fid)
2411{
Steve Frenchfa70b872016-09-22 00:39:34 -05002412 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002413}
2414
Aurelien Aptel9d496402017-02-13 16:16:49 +01002415static int
2416smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2417 const char *search_name,
2418 struct dfs_info3_param **target_nodes,
2419 unsigned int *num_of_nodes,
2420 const struct nls_table *nls_codepage, int remap)
2421{
2422 int rc;
2423 __le16 *utf16_path = NULL;
2424 int utf16_path_len = 0;
2425 struct cifs_tcon *tcon;
2426 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2427 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2428 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2429
Christoph Probsta205d502019-05-08 21:36:25 +02002430 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002431
2432 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002433 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002434 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002435 tcon = ses->tcon_ipc;
2436 if (tcon == NULL) {
2437 spin_lock(&cifs_tcp_ses_lock);
2438 tcon = list_first_entry_or_null(&ses->tcon_list,
2439 struct cifs_tcon,
2440 tcon_list);
2441 if (tcon)
2442 tcon->tc_count++;
2443 spin_unlock(&cifs_tcp_ses_lock);
2444 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002445
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002446 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002447 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2448 ses);
2449 rc = -ENOTCONN;
2450 goto out;
2451 }
2452
2453 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2454 &utf16_path_len,
2455 nls_codepage, remap);
2456 if (!utf16_path) {
2457 rc = -ENOMEM;
2458 goto out;
2459 }
2460
2461 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2462 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2463 if (!dfs_req) {
2464 rc = -ENOMEM;
2465 goto out;
2466 }
2467
2468 /* Highest DFS referral version understood */
2469 dfs_req->MaxReferralLevel = DFS_VERSION;
2470
2471 /* Path to resolve in an UTF-16 null-terminated string */
2472 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2473
2474 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002475 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2476 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002477 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002478 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002479 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002480 } while (rc == -EAGAIN);
2481
2482 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002483 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002484 cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002485 goto out;
2486 }
2487
2488 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2489 num_of_nodes, target_nodes,
2490 nls_codepage, remap, search_name,
2491 true /* is_unicode */);
2492 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002493 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002494 goto out;
2495 }
2496
2497 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002498 if (tcon && !tcon->ipc) {
2499 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002500 spin_lock(&cifs_tcp_ses_lock);
2501 tcon->tc_count--;
2502 spin_unlock(&cifs_tcp_ses_lock);
2503 }
2504 kfree(utf16_path);
2505 kfree(dfs_req);
2506 kfree(dfs_rsp);
2507 return rc;
2508}
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002509
2510static int
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002511parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2512 u32 plen, char **target_path,
2513 struct cifs_sb_info *cifs_sb)
2514{
2515 unsigned int len;
2516
2517 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2518 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2519
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002520 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2521 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2522 le64_to_cpu(symlink_buf->InodeType));
2523 return -EOPNOTSUPP;
2524 }
2525
2526 *target_path = cifs_strndup_from_utf16(
2527 symlink_buf->PathBuffer,
2528 len, true, cifs_sb->local_nls);
2529 if (!(*target_path))
2530 return -ENOMEM;
2531
2532 convert_delimiter(*target_path, '/');
2533 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2534
2535 return 0;
2536}
2537
2538static int
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002539parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2540 u32 plen, char **target_path,
2541 struct cifs_sb_info *cifs_sb)
2542{
2543 unsigned int sub_len;
2544 unsigned int sub_offset;
2545
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002546 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002547
2548 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2549 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2550 if (sub_offset + 20 > plen ||
2551 sub_offset + sub_len + 20 > plen) {
2552 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2553 return -EIO;
2554 }
2555
2556 *target_path = cifs_strndup_from_utf16(
2557 symlink_buf->PathBuffer + sub_offset,
2558 sub_len, true, cifs_sb->local_nls);
2559 if (!(*target_path))
2560 return -ENOMEM;
2561
2562 convert_delimiter(*target_path, '/');
2563 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2564
2565 return 0;
2566}
2567
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002568static int
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002569parse_reparse_point(struct reparse_data_buffer *buf,
2570 u32 plen, char **target_path,
2571 struct cifs_sb_info *cifs_sb)
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002572{
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002573 if (plen < sizeof(struct reparse_data_buffer)) {
2574 cifs_dbg(VFS, "reparse buffer is too small. Must be "
2575 "at least 8 bytes but was %d\n", plen);
2576 return -EIO;
2577 }
2578
2579 if (plen < le16_to_cpu(buf->ReparseDataLength) +
2580 sizeof(struct reparse_data_buffer)) {
2581 cifs_dbg(VFS, "srv returned invalid reparse buf "
2582 "length: %d\n", plen);
2583 return -EIO;
2584 }
2585
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002586 /* See MS-FSCC 2.1.2 */
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002587 switch (le32_to_cpu(buf->ReparseTag)) {
2588 case IO_REPARSE_TAG_NFS:
2589 return parse_reparse_posix(
2590 (struct reparse_posix_data *)buf,
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002591 plen, target_path, cifs_sb);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002592 case IO_REPARSE_TAG_SYMLINK:
2593 return parse_reparse_symlink(
2594 (struct reparse_symlink_data_buffer *)buf,
2595 plen, target_path, cifs_sb);
2596 default:
2597 cifs_dbg(VFS, "srv returned unknown symlink buffer "
2598 "tag:0x%08x\n", le32_to_cpu(buf->ReparseTag));
2599 return -EOPNOTSUPP;
2600 }
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002601}
2602
Pavel Shilovsky78932422016-07-24 10:37:38 +03002603#define SMB2_SYMLINK_STRUCT_SIZE \
2604 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2605
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002606static int
2607smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002608 struct cifs_sb_info *cifs_sb, const char *full_path,
2609 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002610{
2611 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002612 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002613 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2614 struct cifs_open_parms oparms;
2615 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002616 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002617 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002618 struct smb2_symlink_err_rsp *symlink;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002619 unsigned int sub_len;
2620 unsigned int sub_offset;
2621 unsigned int print_len;
2622 unsigned int print_offset;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002623 int flags = 0;
2624 struct smb_rqst rqst[3];
2625 int resp_buftype[3];
2626 struct kvec rsp_iov[3];
2627 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2628 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
2629 struct kvec close_iov[1];
2630 struct smb2_create_rsp *create_rsp;
2631 struct smb2_ioctl_rsp *ioctl_rsp;
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002632 struct reparse_data_buffer *reparse_buf;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002633 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002634
2635 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2636
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002637 *target_path = NULL;
2638
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002639 if (smb3_encryption_required(tcon))
2640 flags |= CIFS_TRANSFORM_REQ;
2641
2642 memset(rqst, 0, sizeof(rqst));
2643 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2644 memset(rsp_iov, 0, sizeof(rsp_iov));
2645
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002646 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2647 if (!utf16_path)
2648 return -ENOMEM;
2649
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002650 /* Open */
2651 memset(&open_iov, 0, sizeof(open_iov));
2652 rqst[0].rq_iov = open_iov;
2653 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2654
2655 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002656 oparms.tcon = tcon;
2657 oparms.desired_access = FILE_READ_ATTRIBUTES;
2658 oparms.disposition = FILE_OPEN;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002659
Steve French5e196972018-08-27 17:04:13 -05002660 if (backup_cred(cifs_sb))
2661 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2662 else
2663 oparms.create_options = 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002664 if (is_reparse_point)
2665 oparms.create_options = OPEN_REPARSE_POINT;
2666
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002667 oparms.fid = &fid;
2668 oparms.reconnect = false;
2669
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002670 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
2671 if (rc)
2672 goto querty_exit;
2673 smb2_set_next_command(tcon, &rqst[0]);
2674
2675
2676 /* IOCTL */
2677 memset(&io_iov, 0, sizeof(io_iov));
2678 rqst[1].rq_iov = io_iov;
2679 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
2680
2681 rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
2682 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
2683 true /* is_fctl */, NULL, 0, CIFSMaxBufSize);
2684 if (rc)
2685 goto querty_exit;
2686
2687 smb2_set_next_command(tcon, &rqst[1]);
2688 smb2_set_related(&rqst[1]);
2689
2690
2691 /* Close */
2692 memset(&close_iov, 0, sizeof(close_iov));
2693 rqst[2].rq_iov = close_iov;
2694 rqst[2].rq_nvec = 1;
2695
Steve French43f8a6a2019-12-02 21:46:54 -06002696 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002697 if (rc)
2698 goto querty_exit;
2699
2700 smb2_set_related(&rqst[2]);
2701
2702 rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
2703 resp_buftype, rsp_iov);
2704
2705 create_rsp = rsp_iov[0].iov_base;
2706 if (create_rsp && create_rsp->sync_hdr.Status)
2707 err_iov = rsp_iov[0];
2708 ioctl_rsp = rsp_iov[1].iov_base;
2709
2710 /*
2711 * Open was successful and we got an ioctl response.
2712 */
2713 if ((rc == 0) && (is_reparse_point)) {
2714 /* See MS-FSCC 2.3.23 */
2715
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002716 reparse_buf = (struct reparse_data_buffer *)
2717 ((char *)ioctl_rsp +
2718 le32_to_cpu(ioctl_rsp->OutputOffset));
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002719 plen = le32_to_cpu(ioctl_rsp->OutputCount);
2720
2721 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
2722 rsp_iov[1].iov_len) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002723 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002724 plen);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002725 rc = -EIO;
2726 goto querty_exit;
2727 }
2728
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002729 rc = parse_reparse_point(reparse_buf, plen, target_path,
2730 cifs_sb);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002731 goto querty_exit;
2732 }
2733
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002734 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002735 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002736 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002737 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002738
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002739 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002740 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002741 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002742 rc = -EINVAL;
2743 goto querty_exit;
2744 }
2745
2746 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2747 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
2748 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
2749 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002750 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002751 }
2752
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002753 /* open must fail on symlink - reset rc */
2754 rc = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002755 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2756 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002757 print_len = le16_to_cpu(symlink->PrintNameLength);
2758 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2759
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002760 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002761 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002762 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002763 }
2764
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002765 if (err_iov.iov_len <
2766 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002767 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002768 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002769 }
2770
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002771 *target_path = cifs_strndup_from_utf16(
2772 (char *)symlink->PathBuffer + sub_offset,
2773 sub_len, true, cifs_sb->local_nls);
2774 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002775 rc = -ENOMEM;
2776 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002777 }
2778 convert_delimiter(*target_path, '/');
2779 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002780
2781 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002782 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002783 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002784 SMB2_open_free(&rqst[0]);
2785 SMB2_ioctl_free(&rqst[1]);
2786 SMB2_close_free(&rqst[2]);
2787 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2788 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2789 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002790 return rc;
2791}
2792
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002793static struct cifs_ntsd *
2794get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2795 const struct cifs_fid *cifsfid, u32 *pacllen)
2796{
2797 struct cifs_ntsd *pntsd = NULL;
2798 unsigned int xid;
2799 int rc = -EOPNOTSUPP;
2800 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2801
2802 if (IS_ERR(tlink))
2803 return ERR_CAST(tlink);
2804
2805 xid = get_xid();
2806 cifs_dbg(FYI, "trying to get acl\n");
2807
2808 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2809 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2810 free_xid(xid);
2811
2812 cifs_put_tlink(tlink);
2813
2814 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2815 if (rc)
2816 return ERR_PTR(rc);
2817 return pntsd;
2818
2819}
2820
2821static struct cifs_ntsd *
2822get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2823 const char *path, u32 *pacllen)
2824{
2825 struct cifs_ntsd *pntsd = NULL;
2826 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2827 unsigned int xid;
2828 int rc;
2829 struct cifs_tcon *tcon;
2830 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2831 struct cifs_fid fid;
2832 struct cifs_open_parms oparms;
2833 __le16 *utf16_path;
2834
2835 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
2836 if (IS_ERR(tlink))
2837 return ERR_CAST(tlink);
2838
2839 tcon = tlink_tcon(tlink);
2840 xid = get_xid();
2841
2842 if (backup_cred(cifs_sb))
Colin Ian King709340a2017-07-05 13:47:34 +01002843 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002844 else
2845 oparms.create_options = 0;
2846
2847 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002848 if (!utf16_path) {
2849 rc = -ENOMEM;
2850 free_xid(xid);
2851 return ERR_PTR(rc);
2852 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002853
2854 oparms.tcon = tcon;
2855 oparms.desired_access = READ_CONTROL;
2856 oparms.disposition = FILE_OPEN;
2857 oparms.fid = &fid;
2858 oparms.reconnect = false;
2859
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002860 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002861 kfree(utf16_path);
2862 if (!rc) {
2863 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2864 fid.volatile_fid, (void **)&pntsd, pacllen);
2865 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2866 }
2867
2868 cifs_put_tlink(tlink);
2869 free_xid(xid);
2870
2871 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2872 if (rc)
2873 return ERR_PTR(rc);
2874 return pntsd;
2875}
2876
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002877static int
2878set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2879 struct inode *inode, const char *path, int aclflag)
2880{
2881 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2882 unsigned int xid;
2883 int rc, access_flags = 0;
2884 struct cifs_tcon *tcon;
2885 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2886 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2887 struct cifs_fid fid;
2888 struct cifs_open_parms oparms;
2889 __le16 *utf16_path;
2890
2891 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
2892 if (IS_ERR(tlink))
2893 return PTR_ERR(tlink);
2894
2895 tcon = tlink_tcon(tlink);
2896 xid = get_xid();
2897
2898 if (backup_cred(cifs_sb))
2899 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2900 else
2901 oparms.create_options = 0;
2902
2903 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
2904 access_flags = WRITE_OWNER;
2905 else
2906 access_flags = WRITE_DAC;
2907
2908 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002909 if (!utf16_path) {
2910 rc = -ENOMEM;
2911 free_xid(xid);
2912 return rc;
2913 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002914
2915 oparms.tcon = tcon;
2916 oparms.desired_access = access_flags;
2917 oparms.disposition = FILE_OPEN;
2918 oparms.path = path;
2919 oparms.fid = &fid;
2920 oparms.reconnect = false;
2921
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002922 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002923 kfree(utf16_path);
2924 if (!rc) {
2925 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2926 fid.volatile_fid, pnntsd, acllen, aclflag);
2927 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2928 }
2929
2930 cifs_put_tlink(tlink);
2931 free_xid(xid);
2932 return rc;
2933}
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05002934
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002935/* Retrieve an ACL from the server */
2936static struct cifs_ntsd *
2937get_smb2_acl(struct cifs_sb_info *cifs_sb,
2938 struct inode *inode, const char *path,
2939 u32 *pacllen)
2940{
2941 struct cifs_ntsd *pntsd = NULL;
2942 struct cifsFileInfo *open_file = NULL;
2943
2944 if (inode)
2945 open_file = find_readable_file(CIFS_I(inode), true);
2946 if (!open_file)
2947 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
2948
2949 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
2950 cifsFileInfo_put(open_file);
2951 return pntsd;
2952}
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002953
Steve French30175622014-08-17 18:16:40 -05002954static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2955 loff_t offset, loff_t len, bool keep_size)
2956{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002957 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05002958 struct inode *inode;
2959 struct cifsInodeInfo *cifsi;
2960 struct cifsFileInfo *cfile = file->private_data;
2961 struct file_zero_data_information fsctl_buf;
2962 long rc;
2963 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002964 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05002965
2966 xid = get_xid();
2967
David Howells2b0143b2015-03-17 22:25:59 +00002968 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05002969 cifsi = CIFS_I(inode);
2970
Christoph Probsta205d502019-05-08 21:36:25 +02002971 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05002972 ses->Suid, offset, len);
2973
2974
Steve French30175622014-08-17 18:16:40 -05002975 /* if file not oplocked can't be sure whether asking to extend size */
2976 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05002977 if (keep_size == false) {
2978 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05002979 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
2980 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05002981 free_xid(xid);
2982 return rc;
2983 }
Steve French30175622014-08-17 18:16:40 -05002984
Steve Frenchd1c35af2019-05-09 00:09:37 -05002985 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05002986
2987 fsctl_buf.FileOffset = cpu_to_le64(offset);
2988 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
2989
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10002990 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2991 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
2992 (char *)&fsctl_buf,
2993 sizeof(struct file_zero_data_information),
2994 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10002995 if (rc)
2996 goto zero_range_exit;
2997
2998 /*
2999 * do we also need to change the size of the file?
3000 */
3001 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003002 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003003 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3004 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003005 }
3006
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003007 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05003008 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05003009 if (rc)
3010 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3011 ses->Suid, offset, len, rc);
3012 else
3013 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3014 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05003015 return rc;
3016}
3017
Steve French31742c52014-08-17 08:38:47 -05003018static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3019 loff_t offset, loff_t len)
3020{
3021 struct inode *inode;
Steve French31742c52014-08-17 08:38:47 -05003022 struct cifsFileInfo *cfile = file->private_data;
3023 struct file_zero_data_information fsctl_buf;
3024 long rc;
3025 unsigned int xid;
3026 __u8 set_sparse = 1;
3027
3028 xid = get_xid();
3029
David Howells2b0143b2015-03-17 22:25:59 +00003030 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05003031
3032 /* Need to make file sparse, if not already, before freeing range. */
3033 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05003034 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3035 rc = -EOPNOTSUPP;
3036 free_xid(xid);
3037 return rc;
3038 }
Steve French31742c52014-08-17 08:38:47 -05003039
Christoph Probsta205d502019-05-08 21:36:25 +02003040 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05003041
3042 fsctl_buf.FileOffset = cpu_to_le64(offset);
3043 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3044
3045 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3046 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01003047 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05003048 sizeof(struct file_zero_data_information),
3049 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05003050 free_xid(xid);
3051 return rc;
3052}
3053
Steve French9ccf3212014-10-18 17:01:15 -05003054static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3055 loff_t off, loff_t len, bool keep_size)
3056{
3057 struct inode *inode;
3058 struct cifsInodeInfo *cifsi;
3059 struct cifsFileInfo *cfile = file->private_data;
3060 long rc = -EOPNOTSUPP;
3061 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003062 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05003063
3064 xid = get_xid();
3065
David Howells2b0143b2015-03-17 22:25:59 +00003066 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05003067 cifsi = CIFS_I(inode);
3068
Steve French779ede02019-03-13 01:41:49 -05003069 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3070 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003071 /* if file not oplocked can't be sure whether asking to extend size */
3072 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003073 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05003074 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3075 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003076 free_xid(xid);
3077 return rc;
3078 }
Steve French9ccf3212014-10-18 17:01:15 -05003079
3080 /*
3081 * Files are non-sparse by default so falloc may be a no-op
3082 * Must check if file sparse. If not sparse, and not extending
3083 * then no need to do anything since file already allocated
3084 */
3085 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
3086 if (keep_size == true)
Steve Frenchcfe89092018-05-19 02:04:55 -05003087 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05003088 /* check if extending file */
3089 else if (i_size_read(inode) >= off + len)
3090 /* not extending file and already not sparse */
Steve Frenchcfe89092018-05-19 02:04:55 -05003091 rc = 0;
Steve French9ccf3212014-10-18 17:01:15 -05003092 /* BB: in future add else clause to extend file */
3093 else
Steve Frenchcfe89092018-05-19 02:04:55 -05003094 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003095 if (rc)
3096 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3097 tcon->tid, tcon->ses->Suid, off, len, rc);
3098 else
3099 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid,
3100 tcon->tid, tcon->ses->Suid, off, len);
Steve Frenchcfe89092018-05-19 02:04:55 -05003101 free_xid(xid);
3102 return rc;
Steve French9ccf3212014-10-18 17:01:15 -05003103 }
3104
3105 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3106 /*
3107 * Check if falloc starts within first few pages of file
3108 * and ends within a few pages of the end of file to
3109 * ensure that most of file is being forced to be
3110 * fallocated now. If so then setting whole file sparse
3111 * ie potentially making a few extra pages at the beginning
3112 * or end of the file non-sparse via set_sparse is harmless.
3113 */
Steve Frenchcfe89092018-05-19 02:04:55 -05003114 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3115 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003116 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3117 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003118 free_xid(xid);
3119 return rc;
3120 }
Steve French9ccf3212014-10-18 17:01:15 -05003121
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003122 smb2_set_sparse(xid, tcon, cfile, inode, false);
3123 rc = 0;
3124 } else {
3125 smb2_set_sparse(xid, tcon, cfile, inode, false);
3126 rc = 0;
3127 if (i_size_read(inode) < off + len) {
3128 eof = cpu_to_le64(off + len);
3129 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3130 cfile->fid.volatile_fid, cfile->pid,
3131 &eof);
3132 }
Steve French9ccf3212014-10-18 17:01:15 -05003133 }
Steve French9ccf3212014-10-18 17:01:15 -05003134
Steve French779ede02019-03-13 01:41:49 -05003135 if (rc)
3136 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3137 tcon->ses->Suid, off, len, rc);
3138 else
3139 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3140 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003141
3142 free_xid(xid);
3143 return rc;
3144}
3145
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003146static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3147{
3148 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3149 struct cifsInodeInfo *cifsi;
3150 struct inode *inode;
3151 int rc = 0;
3152 struct file_allocated_range_buffer in_data, *out_data = NULL;
3153 u32 out_data_len;
3154 unsigned int xid;
3155
3156 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3157 return generic_file_llseek(file, offset, whence);
3158
3159 inode = d_inode(cfile->dentry);
3160 cifsi = CIFS_I(inode);
3161
3162 if (offset < 0 || offset >= i_size_read(inode))
3163 return -ENXIO;
3164
3165 xid = get_xid();
3166 /*
3167 * We need to be sure that all dirty pages are written as they
3168 * might fill holes on the server.
3169 * Note that we also MUST flush any written pages since at least
3170 * some servers (Windows2016) will not reflect recent writes in
3171 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3172 */
3173 wrcfile = find_writable_file(cifsi, false);
3174 if (wrcfile) {
3175 filemap_write_and_wait(inode->i_mapping);
3176 smb2_flush_file(xid, tcon, &wrcfile->fid);
3177 cifsFileInfo_put(wrcfile);
3178 }
3179
3180 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3181 if (whence == SEEK_HOLE)
3182 offset = i_size_read(inode);
3183 goto lseek_exit;
3184 }
3185
3186 in_data.file_offset = cpu_to_le64(offset);
3187 in_data.length = cpu_to_le64(i_size_read(inode));
3188
3189 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3190 cfile->fid.volatile_fid,
3191 FSCTL_QUERY_ALLOCATED_RANGES, true,
3192 (char *)&in_data, sizeof(in_data),
3193 sizeof(struct file_allocated_range_buffer),
3194 (char **)&out_data, &out_data_len);
3195 if (rc == -E2BIG)
3196 rc = 0;
3197 if (rc)
3198 goto lseek_exit;
3199
3200 if (whence == SEEK_HOLE && out_data_len == 0)
3201 goto lseek_exit;
3202
3203 if (whence == SEEK_DATA && out_data_len == 0) {
3204 rc = -ENXIO;
3205 goto lseek_exit;
3206 }
3207
3208 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3209 rc = -EINVAL;
3210 goto lseek_exit;
3211 }
3212 if (whence == SEEK_DATA) {
3213 offset = le64_to_cpu(out_data->file_offset);
3214 goto lseek_exit;
3215 }
3216 if (offset < le64_to_cpu(out_data->file_offset))
3217 goto lseek_exit;
3218
3219 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3220
3221 lseek_exit:
3222 free_xid(xid);
3223 kfree(out_data);
3224 if (!rc)
3225 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3226 else
3227 return rc;
3228}
3229
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003230static int smb3_fiemap(struct cifs_tcon *tcon,
3231 struct cifsFileInfo *cfile,
3232 struct fiemap_extent_info *fei, u64 start, u64 len)
3233{
3234 unsigned int xid;
3235 struct file_allocated_range_buffer in_data, *out_data;
3236 u32 out_data_len;
3237 int i, num, rc, flags, last_blob;
3238 u64 next;
3239
3240 if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
3241 return -EBADR;
3242
3243 xid = get_xid();
3244 again:
3245 in_data.file_offset = cpu_to_le64(start);
3246 in_data.length = cpu_to_le64(len);
3247
3248 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3249 cfile->fid.volatile_fid,
3250 FSCTL_QUERY_ALLOCATED_RANGES, true,
3251 (char *)&in_data, sizeof(in_data),
3252 1024 * sizeof(struct file_allocated_range_buffer),
3253 (char **)&out_data, &out_data_len);
3254 if (rc == -E2BIG) {
3255 last_blob = 0;
3256 rc = 0;
3257 } else
3258 last_blob = 1;
3259 if (rc)
3260 goto out;
3261
3262 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3263 rc = -EINVAL;
3264 goto out;
3265 }
3266 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3267 rc = -EINVAL;
3268 goto out;
3269 }
3270
3271 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3272 for (i = 0; i < num; i++) {
3273 flags = 0;
3274 if (i == num - 1 && last_blob)
3275 flags |= FIEMAP_EXTENT_LAST;
3276
3277 rc = fiemap_fill_next_extent(fei,
3278 le64_to_cpu(out_data[i].file_offset),
3279 le64_to_cpu(out_data[i].file_offset),
3280 le64_to_cpu(out_data[i].length),
3281 flags);
3282 if (rc < 0)
3283 goto out;
3284 if (rc == 1) {
3285 rc = 0;
3286 goto out;
3287 }
3288 }
3289
3290 if (!last_blob) {
3291 next = le64_to_cpu(out_data[num - 1].file_offset) +
3292 le64_to_cpu(out_data[num - 1].length);
3293 len = len - (next - start);
3294 start = next;
3295 goto again;
3296 }
3297
3298 out:
3299 free_xid(xid);
3300 kfree(out_data);
3301 return rc;
3302}
Steve French9ccf3212014-10-18 17:01:15 -05003303
Steve French31742c52014-08-17 08:38:47 -05003304static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3305 loff_t off, loff_t len)
3306{
3307 /* KEEP_SIZE already checked for by do_fallocate */
3308 if (mode & FALLOC_FL_PUNCH_HOLE)
3309 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05003310 else if (mode & FALLOC_FL_ZERO_RANGE) {
3311 if (mode & FALLOC_FL_KEEP_SIZE)
3312 return smb3_zero_range(file, tcon, off, len, true);
3313 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05003314 } else if (mode == FALLOC_FL_KEEP_SIZE)
3315 return smb3_simple_falloc(file, tcon, off, len, true);
3316 else if (mode == 0)
3317 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05003318
3319 return -EOPNOTSUPP;
3320}
3321
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003322static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003323smb2_downgrade_oplock(struct TCP_Server_Info *server,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003324 struct cifsInodeInfo *cinode, __u32 oplock,
3325 unsigned int epoch, bool *purge_cache)
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003326{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003327 server->ops->set_oplock_level(cinode, oplock, 0, NULL);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003328}
3329
3330static void
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003331smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3332 unsigned int epoch, bool *purge_cache);
3333
3334static void
3335smb3_downgrade_oplock(struct TCP_Server_Info *server,
3336 struct cifsInodeInfo *cinode, __u32 oplock,
3337 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003338{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003339 unsigned int old_state = cinode->oplock;
3340 unsigned int old_epoch = cinode->epoch;
3341 unsigned int new_state;
3342
3343 if (epoch > old_epoch) {
3344 smb21_set_oplock_level(cinode, oplock, 0, NULL);
3345 cinode->epoch = epoch;
3346 }
3347
3348 new_state = cinode->oplock;
3349 *purge_cache = false;
3350
3351 if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
3352 (new_state & CIFS_CACHE_READ_FLG) == 0)
3353 *purge_cache = true;
3354 else if (old_state == new_state && (epoch - old_epoch > 1))
3355 *purge_cache = true;
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003356}
3357
3358static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003359smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3360 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003361{
3362 oplock &= 0xFF;
3363 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3364 return;
3365 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003366 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003367 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3368 &cinode->vfs_inode);
3369 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003370 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003371 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3372 &cinode->vfs_inode);
3373 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3374 cinode->oplock = CIFS_CACHE_READ_FLG;
3375 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3376 &cinode->vfs_inode);
3377 } else
3378 cinode->oplock = 0;
3379}
3380
3381static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003382smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3383 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003384{
3385 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003386 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003387
3388 oplock &= 0xFF;
3389 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3390 return;
3391
Pavel Shilovskya016e272019-09-26 12:31:20 -07003392 /* Check if the server granted an oplock rather than a lease */
3393 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3394 return smb2_set_oplock_level(cinode, oplock, epoch,
3395 purge_cache);
3396
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003397 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003398 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003399 strcat(message, "R");
3400 }
3401 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003402 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003403 strcat(message, "H");
3404 }
3405 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003406 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003407 strcat(message, "W");
3408 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003409 if (!new_oplock)
3410 strncpy(message, "None", sizeof(message));
3411
3412 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003413 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3414 &cinode->vfs_inode);
3415}
3416
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003417static void
3418smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3419 unsigned int epoch, bool *purge_cache)
3420{
3421 unsigned int old_oplock = cinode->oplock;
3422
3423 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3424
3425 if (purge_cache) {
3426 *purge_cache = false;
3427 if (old_oplock == CIFS_CACHE_READ_FLG) {
3428 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3429 (epoch - cinode->epoch > 0))
3430 *purge_cache = true;
3431 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3432 (epoch - cinode->epoch > 1))
3433 *purge_cache = true;
3434 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3435 (epoch - cinode->epoch > 1))
3436 *purge_cache = true;
3437 else if (cinode->oplock == 0 &&
3438 (epoch - cinode->epoch > 0))
3439 *purge_cache = true;
3440 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
3441 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3442 (epoch - cinode->epoch > 0))
3443 *purge_cache = true;
3444 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3445 (epoch - cinode->epoch > 1))
3446 *purge_cache = true;
3447 }
3448 cinode->epoch = epoch;
3449 }
3450}
3451
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003452static bool
3453smb2_is_read_op(__u32 oplock)
3454{
3455 return oplock == SMB2_OPLOCK_LEVEL_II;
3456}
3457
3458static bool
3459smb21_is_read_op(__u32 oplock)
3460{
3461 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3462 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3463}
3464
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003465static __le32
3466map_oplock_to_lease(u8 oplock)
3467{
3468 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3469 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3470 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3471 return SMB2_LEASE_READ_CACHING;
3472 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3473 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3474 SMB2_LEASE_WRITE_CACHING;
3475 return 0;
3476}
3477
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003478static char *
3479smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3480{
3481 struct create_lease *buf;
3482
3483 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
3484 if (!buf)
3485 return NULL;
3486
Stefano Brivio729c0c92018-07-05 15:10:02 +02003487 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003488 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003489
3490 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3491 (struct create_lease, lcontext));
3492 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
3493 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3494 (struct create_lease, Name));
3495 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003496 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003497 buf->Name[0] = 'R';
3498 buf->Name[1] = 'q';
3499 buf->Name[2] = 'L';
3500 buf->Name[3] = 's';
3501 return (char *)buf;
3502}
3503
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003504static char *
3505smb3_create_lease_buf(u8 *lease_key, u8 oplock)
3506{
3507 struct create_lease_v2 *buf;
3508
3509 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
3510 if (!buf)
3511 return NULL;
3512
Stefano Brivio729c0c92018-07-05 15:10:02 +02003513 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003514 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3515
3516 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3517 (struct create_lease_v2, lcontext));
3518 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
3519 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3520 (struct create_lease_v2, Name));
3521 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003522 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003523 buf->Name[0] = 'R';
3524 buf->Name[1] = 'q';
3525 buf->Name[2] = 'L';
3526 buf->Name[3] = 's';
3527 return (char *)buf;
3528}
3529
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003530static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003531smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003532{
3533 struct create_lease *lc = (struct create_lease *)buf;
3534
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003535 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003536 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3537 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3538 return le32_to_cpu(lc->lcontext.LeaseState);
3539}
3540
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003541static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003542smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003543{
3544 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
3545
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003546 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003547 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3548 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003549 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02003550 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003551 return le32_to_cpu(lc->lcontext.LeaseState);
3552}
3553
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003554static unsigned int
3555smb2_wp_retry_size(struct inode *inode)
3556{
3557 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
3558 SMB2_MAX_BUFFER_SIZE);
3559}
3560
Pavel Shilovsky52755802014-08-18 20:49:57 +04003561static bool
3562smb2_dir_needs_close(struct cifsFileInfo *cfile)
3563{
3564 return !cfile->invalidHandle;
3565}
3566
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003567static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003568fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
Steve French2b2f7542019-06-07 15:16:10 -05003569 struct smb_rqst *old_rq, __le16 cipher_type)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003570{
3571 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003572 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003573
3574 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
3575 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
3576 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
3577 tr_hdr->Flags = cpu_to_le16(0x01);
Steve French2b2f7542019-06-07 15:16:10 -05003578 if (cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3579 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3580 else
3581 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003582 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003583}
3584
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003585/* We can not use the normal sg_set_buf() as we will sometimes pass a
3586 * stack object as buf.
3587 */
3588static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3589 unsigned int buflen)
3590{
Sebastien Tisserantee9d6612019-08-01 12:06:08 -05003591 void *addr;
3592 /*
3593 * VMAP_STACK (at least) puts stack into the vmalloc address space
3594 */
3595 if (is_vmalloc_addr(buf))
3596 addr = vmalloc_to_page(buf);
3597 else
3598 addr = virt_to_page(buf);
3599 sg_set_page(sg, addr, buflen, offset_in_page(buf));
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003600}
3601
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003602/* Assumes the first rqst has a transform header as the first iov.
3603 * I.e.
3604 * rqst[0].rq_iov[0] is transform header
3605 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
3606 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003607 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003608static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003609init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003610{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003611 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003612 struct scatterlist *sg;
3613 unsigned int i;
3614 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003615 unsigned int idx = 0;
3616 int skip;
3617
3618 sg_len = 1;
3619 for (i = 0; i < num_rqst; i++)
3620 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003621
3622 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
3623 if (!sg)
3624 return NULL;
3625
3626 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003627 for (i = 0; i < num_rqst; i++) {
3628 for (j = 0; j < rqst[i].rq_nvec; j++) {
3629 /*
3630 * The first rqst has a transform header where the
3631 * first 20 bytes are not part of the encrypted blob
3632 */
3633 skip = (i == 0) && (j == 0) ? 20 : 0;
3634 smb2_sg_set_buf(&sg[idx++],
3635 rqst[i].rq_iov[j].iov_base + skip,
3636 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10003637 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05003638
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003639 for (j = 0; j < rqst[i].rq_npages; j++) {
3640 unsigned int len, offset;
3641
3642 rqst_page_get_length(&rqst[i], j, &len, &offset);
3643 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
3644 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003645 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003646 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003647 return sg;
3648}
3649
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003650static int
3651smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
3652{
3653 struct cifs_ses *ses;
3654 u8 *ses_enc_key;
3655
3656 spin_lock(&cifs_tcp_ses_lock);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02003657 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
3658 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3659 if (ses->Suid == ses_id) {
3660 ses_enc_key = enc ? ses->smb3encryptionkey :
3661 ses->smb3decryptionkey;
3662 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
3663 spin_unlock(&cifs_tcp_ses_lock);
3664 return 0;
3665 }
3666 }
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003667 }
3668 spin_unlock(&cifs_tcp_ses_lock);
3669
3670 return 1;
3671}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003672/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003673 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
3674 * iov[0] - transform header (associate data),
3675 * iov[1-N] - SMB2 header and pages - data to encrypt.
3676 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003677 * untouched.
3678 */
3679static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003680crypt_message(struct TCP_Server_Info *server, int num_rqst,
3681 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003682{
3683 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003684 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003685 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003686 int rc = 0;
3687 struct scatterlist *sg;
3688 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003689 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003690 struct aead_request *req;
3691 char *iv;
3692 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003693 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003694 struct crypto_aead *tfm;
3695 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3696
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003697 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
3698 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003699 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003700 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003701 return 0;
3702 }
3703
3704 rc = smb3_crypto_aead_allocate(server);
3705 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003706 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003707 return rc;
3708 }
3709
3710 tfm = enc ? server->secmech.ccmaesencrypt :
3711 server->secmech.ccmaesdecrypt;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003712 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003713 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003714 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003715 return rc;
3716 }
3717
3718 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3719 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003720 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003721 return rc;
3722 }
3723
3724 req = aead_request_alloc(tfm, GFP_KERNEL);
3725 if (!req) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003726 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003727 return -ENOMEM;
3728 }
3729
3730 if (!enc) {
3731 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3732 crypt_len += SMB2_SIGNATURE_SIZE;
3733 }
3734
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003735 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003736 if (!sg) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003737 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003738 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003739 goto free_req;
3740 }
3741
3742 iv_len = crypto_aead_ivsize(tfm);
3743 iv = kzalloc(iv_len, GFP_KERNEL);
3744 if (!iv) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003745 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003746 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003747 goto free_sg;
3748 }
Steve French2b2f7542019-06-07 15:16:10 -05003749
3750 if (server->cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3751 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3752 else {
3753 iv[0] = 3;
3754 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
3755 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003756
3757 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
3758 aead_request_set_ad(req, assoc_data_len);
3759
3760 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003761 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003762
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003763 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
3764 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003765
3766 if (!rc && enc)
3767 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
3768
3769 kfree(iv);
3770free_sg:
3771 kfree(sg);
3772free_req:
3773 kfree(req);
3774 return rc;
3775}
3776
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003777void
3778smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003779{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003780 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003781
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003782 for (i = 0; i < num_rqst; i++) {
3783 if (rqst[i].rq_pages) {
3784 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
3785 put_page(rqst[i].rq_pages[j]);
3786 kfree(rqst[i].rq_pages);
3787 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003788 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003789}
3790
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003791/*
3792 * This function will initialize new_rq and encrypt the content.
3793 * The first entry, new_rq[0], only contains a single iov which contains
3794 * a smb2_transform_hdr and is pre-allocated by the caller.
3795 * This function then populates new_rq[1+] with the content from olq_rq[0+].
3796 *
3797 * The end result is an array of smb_rqst structures where the first structure
3798 * only contains a single iov for the transform header which we then can pass
3799 * to crypt_message().
3800 *
3801 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
3802 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
3803 */
3804static int
3805smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
3806 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003807{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003808 struct page **pages;
3809 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
3810 unsigned int npages;
3811 unsigned int orig_len = 0;
3812 int i, j;
3813 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003814
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003815 for (i = 1; i < num_rqst; i++) {
3816 npages = old_rq[i - 1].rq_npages;
3817 pages = kmalloc_array(npages, sizeof(struct page *),
3818 GFP_KERNEL);
3819 if (!pages)
3820 goto err_free;
3821
3822 new_rq[i].rq_pages = pages;
3823 new_rq[i].rq_npages = npages;
3824 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
3825 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
3826 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
3827 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
3828 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
3829
3830 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
3831
3832 for (j = 0; j < npages; j++) {
3833 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3834 if (!pages[j])
3835 goto err_free;
3836 }
3837
3838 /* copy pages form the old */
3839 for (j = 0; j < npages; j++) {
3840 char *dst, *src;
3841 unsigned int offset, len;
3842
3843 rqst_page_get_length(&new_rq[i], j, &len, &offset);
3844
3845 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
3846 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
3847
3848 memcpy(dst, src, len);
3849 kunmap(new_rq[i].rq_pages[j]);
3850 kunmap(old_rq[i - 1].rq_pages[j]);
3851 }
3852 }
3853
3854 /* fill the 1st iov with a transform header */
Steve French2b2f7542019-06-07 15:16:10 -05003855 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003856
3857 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02003858 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003859 if (rc)
3860 goto err_free;
3861
3862 return rc;
3863
3864err_free:
3865 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
3866 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003867}
3868
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003869static int
3870smb3_is_transform_hdr(void *buf)
3871{
3872 struct smb2_transform_hdr *trhdr = buf;
3873
3874 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
3875}
3876
3877static int
3878decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
3879 unsigned int buf_data_size, struct page **pages,
3880 unsigned int npages, unsigned int page_data_size)
3881{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003882 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003883 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003884 int rc;
3885
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003886 iov[0].iov_base = buf;
3887 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
3888 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
3889 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003890
3891 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003892 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003893 rqst.rq_pages = pages;
3894 rqst.rq_npages = npages;
3895 rqst.rq_pagesz = PAGE_SIZE;
3896 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
3897
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003898 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02003899 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003900
3901 if (rc)
3902 return rc;
3903
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003904 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003905
3906 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003907
3908 return rc;
3909}
3910
3911static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003912read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
3913 unsigned int npages, unsigned int len)
3914{
3915 int i;
3916 int length;
3917
3918 for (i = 0; i < npages; i++) {
3919 struct page *page = pages[i];
3920 size_t n;
3921
3922 n = len;
3923 if (len >= PAGE_SIZE) {
3924 /* enough data to fill the page */
3925 n = PAGE_SIZE;
3926 len -= n;
3927 } else {
3928 zero_user(page, len, PAGE_SIZE - len);
3929 len = 0;
3930 }
Long Li1dbe3462018-05-30 12:47:55 -07003931 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003932 if (length < 0)
3933 return length;
3934 server->total_read += length;
3935 }
3936
3937 return 0;
3938}
3939
3940static int
3941init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
3942 unsigned int cur_off, struct bio_vec **page_vec)
3943{
3944 struct bio_vec *bvec;
3945 int i;
3946
3947 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
3948 if (!bvec)
3949 return -ENOMEM;
3950
3951 for (i = 0; i < npages; i++) {
3952 bvec[i].bv_page = pages[i];
3953 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
3954 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
3955 data_size -= bvec[i].bv_len;
3956 }
3957
3958 if (data_size != 0) {
3959 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
3960 kfree(bvec);
3961 return -EIO;
3962 }
3963
3964 *page_vec = bvec;
3965 return 0;
3966}
3967
3968static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003969handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3970 char *buf, unsigned int buf_len, struct page **pages,
3971 unsigned int npages, unsigned int page_data_size)
3972{
3973 unsigned int data_offset;
3974 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08003975 unsigned int cur_off;
3976 unsigned int cur_page_idx;
3977 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003978 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10003979 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003980 struct bio_vec *bvec = NULL;
3981 struct iov_iter iter;
3982 struct kvec iov;
3983 int length;
Long Li74dcf412017-11-22 17:38:46 -07003984 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003985
3986 if (shdr->Command != SMB2_READ) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003987 cifs_server_dbg(VFS, "only big read responses are supported\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003988 return -ENOTSUPP;
3989 }
3990
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07003991 if (server->ops->is_session_expired &&
3992 server->ops->is_session_expired(buf)) {
3993 cifs_reconnect(server);
3994 wake_up(&server->response_q);
3995 return -1;
3996 }
3997
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003998 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08003999 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004000 return -1;
4001
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004002 /* set up first two iov to get credits */
4003 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004004 rdata->iov[0].iov_len = 0;
4005 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004006 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004007 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004008 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4009 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4010 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4011 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4012
4013 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004014 if (rdata->result != 0) {
4015 cifs_dbg(FYI, "%s: server returned error %d\n",
4016 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004017 /* normal error on read response */
4018 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004019 return 0;
4020 }
4021
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004022 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07004023#ifdef CONFIG_CIFS_SMB_DIRECT
4024 use_rdma_mr = rdata->mr;
4025#endif
4026 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004027
4028 if (data_offset < server->vals->read_rsp_size) {
4029 /*
4030 * win2k8 sometimes sends an offset of 0 when the read
4031 * is beyond the EOF. Treat it as if the data starts just after
4032 * the header.
4033 */
4034 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4035 __func__, data_offset);
4036 data_offset = server->vals->read_rsp_size;
4037 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4038 /* data_offset is beyond the end of smallbuf */
4039 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4040 __func__, data_offset);
4041 rdata->result = -EIO;
4042 dequeue_mid(mid, rdata->result);
4043 return 0;
4044 }
4045
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004046 pad_len = data_offset - server->vals->read_rsp_size;
4047
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004048 if (buf_len <= data_offset) {
4049 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004050 cur_page_idx = pad_len / PAGE_SIZE;
4051 cur_off = pad_len % PAGE_SIZE;
4052
4053 if (cur_page_idx != 0) {
4054 /* data offset is beyond the 1st page of response */
4055 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4056 __func__, data_offset);
4057 rdata->result = -EIO;
4058 dequeue_mid(mid, rdata->result);
4059 return 0;
4060 }
4061
4062 if (data_len > page_data_size - pad_len) {
4063 /* data_len is corrupt -- discard frame */
4064 rdata->result = -EIO;
4065 dequeue_mid(mid, rdata->result);
4066 return 0;
4067 }
4068
4069 rdata->result = init_read_bvec(pages, npages, page_data_size,
4070 cur_off, &bvec);
4071 if (rdata->result != 0) {
4072 dequeue_mid(mid, rdata->result);
4073 return 0;
4074 }
4075
David Howellsaa563d72018-10-20 00:57:56 +01004076 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004077 } else if (buf_len >= data_offset + data_len) {
4078 /* read response payload is in buf */
4079 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
4080 iov.iov_base = buf + data_offset;
4081 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01004082 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004083 } else {
4084 /* read response payload cannot be in both buf and pages */
4085 WARN_ONCE(1, "buf can not contain only a part of read data");
4086 rdata->result = -EIO;
4087 dequeue_mid(mid, rdata->result);
4088 return 0;
4089 }
4090
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004091 length = rdata->copy_into_pages(server, rdata, &iter);
4092
4093 kfree(bvec);
4094
4095 if (length < 0)
4096 return length;
4097
4098 dequeue_mid(mid, false);
4099 return length;
4100}
4101
Steve French35cf94a2019-09-07 01:09:49 -05004102struct smb2_decrypt_work {
4103 struct work_struct decrypt;
4104 struct TCP_Server_Info *server;
4105 struct page **ppages;
4106 char *buf;
4107 unsigned int npages;
4108 unsigned int len;
4109};
4110
4111
4112static void smb2_decrypt_offload(struct work_struct *work)
4113{
4114 struct smb2_decrypt_work *dw = container_of(work,
4115 struct smb2_decrypt_work, decrypt);
4116 int i, rc;
4117 struct mid_q_entry *mid;
4118
4119 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
4120 dw->ppages, dw->npages, dw->len);
4121 if (rc) {
4122 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4123 goto free_pages;
4124 }
4125
Steve French22553972019-09-13 16:47:31 -05004126 dw->server->lstrp = jiffies;
Steve French35cf94a2019-09-07 01:09:49 -05004127 mid = smb2_find_mid(dw->server, dw->buf);
4128 if (mid == NULL)
4129 cifs_dbg(FYI, "mid not found\n");
4130 else {
4131 mid->decrypted = true;
4132 rc = handle_read_data(dw->server, mid, dw->buf,
4133 dw->server->vals->read_rsp_size,
4134 dw->ppages, dw->npages, dw->len);
Steve French22553972019-09-13 16:47:31 -05004135 mid->callback(mid);
4136 cifs_mid_q_entry_release(mid);
Steve French35cf94a2019-09-07 01:09:49 -05004137 }
4138
Steve French35cf94a2019-09-07 01:09:49 -05004139free_pages:
4140 for (i = dw->npages-1; i >= 0; i--)
4141 put_page(dw->ppages[i]);
4142
4143 kfree(dw->ppages);
4144 cifs_small_buf_release(dw->buf);
Steve Frencha08d8972019-10-26 16:00:44 -05004145 kfree(dw);
Steve French35cf94a2019-09-07 01:09:49 -05004146}
4147
4148
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004149static int
Steve French35cf94a2019-09-07 01:09:49 -05004150receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4151 int *num_mids)
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004152{
4153 char *buf = server->smallbuf;
4154 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4155 unsigned int npages;
4156 struct page **pages;
4157 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004158 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004159 int rc;
4160 int i = 0;
Steve French35cf94a2019-09-07 01:09:49 -05004161 struct smb2_decrypt_work *dw;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004162
Steve French35cf94a2019-09-07 01:09:49 -05004163 *num_mids = 1;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004164 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004165 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4166
4167 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4168 if (rc < 0)
4169 return rc;
4170 server->total_read += rc;
4171
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004172 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11004173 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004174 npages = DIV_ROUND_UP(len, PAGE_SIZE);
4175
4176 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
4177 if (!pages) {
4178 rc = -ENOMEM;
4179 goto discard_data;
4180 }
4181
4182 for (; i < npages; i++) {
4183 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4184 if (!pages[i]) {
4185 rc = -ENOMEM;
4186 goto discard_data;
4187 }
4188 }
4189
4190 /* read read data into pages */
4191 rc = read_data_into_pages(server, pages, npages, len);
4192 if (rc)
4193 goto free_pages;
4194
Pavel Shilovsky350be252017-04-10 10:31:33 -07004195 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004196 if (rc)
4197 goto free_pages;
4198
Steve French35cf94a2019-09-07 01:09:49 -05004199 /*
4200 * For large reads, offload to different thread for better performance,
4201 * use more cores decrypting which can be expensive
4202 */
4203
Steve French10328c42019-09-09 13:30:15 -05004204 if ((server->min_offload) && (server->in_flight > 1) &&
Steve French563317e2019-09-08 23:22:02 -05004205 (server->pdu_size >= server->min_offload)) {
Steve French35cf94a2019-09-07 01:09:49 -05004206 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4207 if (dw == NULL)
4208 goto non_offloaded_decrypt;
4209
4210 dw->buf = server->smallbuf;
4211 server->smallbuf = (char *)cifs_small_buf_get();
4212
4213 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4214
4215 dw->npages = npages;
4216 dw->server = server;
4217 dw->ppages = pages;
4218 dw->len = len;
Steve Frencha08d8972019-10-26 16:00:44 -05004219 queue_work(decrypt_wq, &dw->decrypt);
Steve French35cf94a2019-09-07 01:09:49 -05004220 *num_mids = 0; /* worker thread takes care of finding mid */
4221 return -1;
4222 }
4223
4224non_offloaded_decrypt:
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004225 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004226 pages, npages, len);
4227 if (rc)
4228 goto free_pages;
4229
4230 *mid = smb2_find_mid(server, buf);
4231 if (*mid == NULL)
4232 cifs_dbg(FYI, "mid not found\n");
4233 else {
4234 cifs_dbg(FYI, "mid found\n");
4235 (*mid)->decrypted = true;
4236 rc = handle_read_data(server, *mid, buf,
4237 server->vals->read_rsp_size,
4238 pages, npages, len);
4239 }
4240
4241free_pages:
4242 for (i = i - 1; i >= 0; i--)
4243 put_page(pages[i]);
4244 kfree(pages);
4245 return rc;
4246discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07004247 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004248 goto free_pages;
4249}
4250
4251static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004252receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004253 struct mid_q_entry **mids, char **bufs,
4254 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004255{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004256 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004257 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004258 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004259 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004260 unsigned int buf_size;
4261 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004262 int next_is_large;
4263 char *next_buffer = NULL;
4264
4265 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004266
4267 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004268 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004269 server->large_buf = true;
4270 memcpy(server->bigbuf, buf, server->total_read);
4271 buf = server->bigbuf;
4272 }
4273
4274 /* now read the rest */
4275 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004276 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004277 if (length < 0)
4278 return length;
4279 server->total_read += length;
4280
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004281 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004282 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
4283 if (length)
4284 return length;
4285
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004286 next_is_large = server->large_buf;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004287one_more:
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004288 shdr = (struct smb2_sync_hdr *)buf;
4289 if (shdr->NextCommand) {
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004290 if (next_is_large)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004291 next_buffer = (char *)cifs_buf_get();
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004292 else
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004293 next_buffer = (char *)cifs_small_buf_get();
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004294 memcpy(next_buffer,
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004295 buf + le32_to_cpu(shdr->NextCommand),
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004296 pdu_length - le32_to_cpu(shdr->NextCommand));
4297 }
4298
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004299 mid_entry = smb2_find_mid(server, buf);
4300 if (mid_entry == NULL)
4301 cifs_dbg(FYI, "mid not found\n");
4302 else {
4303 cifs_dbg(FYI, "mid found\n");
4304 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004305 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004306 }
4307
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004308 if (*num_mids >= MAX_COMPOUND) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004309 cifs_server_dbg(VFS, "too many PDUs in compound\n");
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004310 return -1;
4311 }
4312 bufs[*num_mids] = buf;
4313 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004314
4315 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004316 ret = mid_entry->handle(server, mid_entry);
4317 else
4318 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004319
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004320 if (ret == 0 && shdr->NextCommand) {
4321 pdu_length -= le32_to_cpu(shdr->NextCommand);
4322 server->large_buf = next_is_large;
4323 if (next_is_large)
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004324 server->bigbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004325 else
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004326 server->smallbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004327 goto one_more;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004328 } else if (ret != 0) {
4329 /*
4330 * ret != 0 here means that we didn't get to handle_mid() thus
4331 * server->smallbuf and server->bigbuf are still valid. We need
4332 * to free next_buffer because it is not going to be used
4333 * anywhere.
4334 */
4335 if (next_is_large)
4336 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4337 else
4338 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004339 }
4340
4341 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004342}
4343
4344static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004345smb3_receive_transform(struct TCP_Server_Info *server,
4346 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004347{
4348 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004349 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004350 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4351 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4352
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004353 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004354 sizeof(struct smb2_sync_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004355 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004356 pdu_length);
4357 cifs_reconnect(server);
4358 wake_up(&server->response_q);
4359 return -ECONNABORTED;
4360 }
4361
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004362 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004363 cifs_server_dbg(VFS, "Transform message is broken\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004364 cifs_reconnect(server);
4365 wake_up(&server->response_q);
4366 return -ECONNABORTED;
4367 }
4368
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004369 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004370 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
Steve French35cf94a2019-09-07 01:09:49 -05004371 return receive_encrypted_read(server, &mids[0], num_mids);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004372 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004373
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004374 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004375}
4376
4377int
4378smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4379{
4380 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
4381
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004382 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004383 NULL, 0, 0);
4384}
4385
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004386static int
4387smb2_next_header(char *buf)
4388{
4389 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
4390 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
4391
4392 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
4393 return sizeof(struct smb2_transform_hdr) +
4394 le32_to_cpu(t_hdr->OriginalMessageSize);
4395
4396 return le32_to_cpu(hdr->NextCommand);
4397}
4398
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004399static int
4400smb2_make_node(unsigned int xid, struct inode *inode,
4401 struct dentry *dentry, struct cifs_tcon *tcon,
4402 char *full_path, umode_t mode, dev_t dev)
4403{
4404 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4405 int rc = -EPERM;
4406 int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
4407 FILE_ALL_INFO *buf = NULL;
4408 struct cifs_io_parms io_parms;
4409 __u32 oplock = 0;
4410 struct cifs_fid fid;
4411 struct cifs_open_parms oparms;
4412 unsigned int bytes_written;
4413 struct win_dev *pdev;
4414 struct kvec iov[2];
4415
4416 /*
4417 * Check if mounted with mount parm 'sfu' mount parm.
4418 * SFU emulation should work with all servers, but only
4419 * supports block and char device (no socket & fifo),
4420 * and was used by default in earlier versions of Windows
4421 */
4422 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
4423 goto out;
4424
4425 /*
4426 * TODO: Add ability to create instead via reparse point. Windows (e.g.
4427 * their current NFS server) uses this approach to expose special files
4428 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
4429 */
4430
4431 if (!S_ISCHR(mode) && !S_ISBLK(mode))
4432 goto out;
4433
4434 cifs_dbg(FYI, "sfu compat create special file\n");
4435
4436 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
4437 if (buf == NULL) {
4438 rc = -ENOMEM;
4439 goto out;
4440 }
4441
4442 if (backup_cred(cifs_sb))
4443 create_options |= CREATE_OPEN_BACKUP_INTENT;
4444
4445 oparms.tcon = tcon;
4446 oparms.cifs_sb = cifs_sb;
4447 oparms.desired_access = GENERIC_WRITE;
4448 oparms.create_options = create_options;
4449 oparms.disposition = FILE_CREATE;
4450 oparms.path = full_path;
4451 oparms.fid = &fid;
4452 oparms.reconnect = false;
4453
4454 if (tcon->ses->server->oplocks)
4455 oplock = REQ_OPLOCK;
4456 else
4457 oplock = 0;
4458 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
4459 if (rc)
4460 goto out;
4461
4462 /*
4463 * BB Do not bother to decode buf since no local inode yet to put
4464 * timestamps in, but we can reuse it safely.
4465 */
4466
4467 pdev = (struct win_dev *)buf;
4468 io_parms.pid = current->tgid;
4469 io_parms.tcon = tcon;
4470 io_parms.offset = 0;
4471 io_parms.length = sizeof(struct win_dev);
4472 iov[1].iov_base = buf;
4473 iov[1].iov_len = sizeof(struct win_dev);
4474 if (S_ISCHR(mode)) {
4475 memcpy(pdev->type, "IntxCHR", 8);
4476 pdev->major = cpu_to_le64(MAJOR(dev));
4477 pdev->minor = cpu_to_le64(MINOR(dev));
4478 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4479 &bytes_written, iov, 1);
4480 } else if (S_ISBLK(mode)) {
4481 memcpy(pdev->type, "IntxBLK", 8);
4482 pdev->major = cpu_to_le64(MAJOR(dev));
4483 pdev->minor = cpu_to_le64(MINOR(dev));
4484 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4485 &bytes_written, iov, 1);
4486 }
4487 tcon->ses->server->ops->close(xid, tcon, &fid);
4488 d_drop(dentry);
4489
4490 /* FIXME: add code here to set EAs */
4491out:
4492 kfree(buf);
4493 return rc;
4494}
4495
4496
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004497struct smb_version_operations smb20_operations = {
4498 .compare_fids = smb2_compare_fids,
4499 .setup_request = smb2_setup_request,
4500 .setup_async_request = smb2_setup_async_request,
4501 .check_receive = smb2_check_receive,
4502 .add_credits = smb2_add_credits,
4503 .set_credits = smb2_set_credits,
4504 .get_credits_field = smb2_get_credits_field,
4505 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004506 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004507 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004508 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004509 .read_data_offset = smb2_read_data_offset,
4510 .read_data_length = smb2_read_data_length,
4511 .map_error = map_smb2_to_linux_error,
4512 .find_mid = smb2_find_mid,
4513 .check_message = smb2_check_message,
4514 .dump_detail = smb2_dump_detail,
4515 .clear_stats = smb2_clear_stats,
4516 .print_stats = smb2_print_stats,
4517 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004518 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004519 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004520 .need_neg = smb2_need_neg,
4521 .negotiate = smb2_negotiate,
4522 .negotiate_wsize = smb2_negotiate_wsize,
4523 .negotiate_rsize = smb2_negotiate_rsize,
4524 .sess_setup = SMB2_sess_setup,
4525 .logoff = SMB2_logoff,
4526 .tree_connect = SMB2_tcon,
4527 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004528 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004529 .is_path_accessible = smb2_is_path_accessible,
4530 .can_echo = smb2_can_echo,
4531 .echo = SMB2_echo,
4532 .query_path_info = smb2_query_path_info,
4533 .get_srv_inum = smb2_get_srv_inum,
4534 .query_file_info = smb2_query_file_info,
4535 .set_path_size = smb2_set_path_size,
4536 .set_file_size = smb2_set_file_size,
4537 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004538 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004539 .mkdir = smb2_mkdir,
4540 .mkdir_setinfo = smb2_mkdir_setinfo,
4541 .rmdir = smb2_rmdir,
4542 .unlink = smb2_unlink,
4543 .rename = smb2_rename_path,
4544 .create_hardlink = smb2_create_hardlink,
4545 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01004546 .query_mf_symlink = smb3_query_mf_symlink,
4547 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004548 .open = smb2_open_file,
4549 .set_fid = smb2_set_fid,
4550 .close = smb2_close_file,
4551 .flush = smb2_flush_file,
4552 .async_readv = smb2_async_readv,
4553 .async_writev = smb2_async_writev,
4554 .sync_read = smb2_sync_read,
4555 .sync_write = smb2_sync_write,
4556 .query_dir_first = smb2_query_dir_first,
4557 .query_dir_next = smb2_query_dir_next,
4558 .close_dir = smb2_close_dir,
4559 .calc_smb_size = smb2_calc_size,
4560 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004561 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004562 .oplock_response = smb2_oplock_response,
4563 .queryfs = smb2_queryfs,
4564 .mand_lock = smb2_mand_lock,
4565 .mand_unlock_range = smb2_unlock_range,
4566 .push_mand_locks = smb2_push_mandatory_locks,
4567 .get_lease_key = smb2_get_lease_key,
4568 .set_lease_key = smb2_set_lease_key,
4569 .new_lease_key = smb2_new_lease_key,
4570 .calc_signature = smb2_calc_signature,
4571 .is_read_op = smb2_is_read_op,
4572 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004573 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004574 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004575 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004576 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004577 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004578 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304579 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004580#ifdef CONFIG_CIFS_XATTR
4581 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004582 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004583#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004584 .get_acl = get_smb2_acl,
4585 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004586 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004587 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004588 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004589 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004590 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004591 .llseek = smb3_llseek,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004592};
4593
Steve French1080ef72011-02-24 18:07:19 +00004594struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004595 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004596 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04004597 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004598 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04004599 .add_credits = smb2_add_credits,
4600 .set_credits = smb2_set_credits,
4601 .get_credits_field = smb2_get_credits_field,
4602 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004603 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004604 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004605 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004606 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004607 .read_data_offset = smb2_read_data_offset,
4608 .read_data_length = smb2_read_data_length,
4609 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004610 .find_mid = smb2_find_mid,
4611 .check_message = smb2_check_message,
4612 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04004613 .clear_stats = smb2_clear_stats,
4614 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004615 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004616 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004617 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04004618 .need_neg = smb2_need_neg,
4619 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07004620 .negotiate_wsize = smb2_negotiate_wsize,
4621 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04004622 .sess_setup = SMB2_sess_setup,
4623 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04004624 .tree_connect = SMB2_tcon,
4625 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004626 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04004627 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04004628 .can_echo = smb2_can_echo,
4629 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04004630 .query_path_info = smb2_query_path_info,
4631 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07004632 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004633 .set_path_size = smb2_set_path_size,
4634 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004635 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004636 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04004637 .mkdir = smb2_mkdir,
4638 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04004639 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07004640 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004641 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07004642 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004643 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004644 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004645 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07004646 .open = smb2_open_file,
4647 .set_fid = smb2_set_fid,
4648 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07004649 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004650 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07004651 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004652 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004653 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004654 .query_dir_first = smb2_query_dir_first,
4655 .query_dir_next = smb2_query_dir_next,
4656 .close_dir = smb2_close_dir,
4657 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07004658 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004659 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004660 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07004661 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07004662 .mand_lock = smb2_mand_lock,
4663 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07004664 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07004665 .get_lease_key = smb2_get_lease_key,
4666 .set_lease_key = smb2_set_lease_key,
4667 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06004668 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004669 .is_read_op = smb21_is_read_op,
4670 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004671 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004672 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004673 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004674 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004675 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05004676 .enum_snapshots = smb3_enum_snapshots,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004677 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304678 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004679#ifdef CONFIG_CIFS_XATTR
4680 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004681 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004682#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004683 .get_acl = get_smb2_acl,
4684 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004685 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004686 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004687 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004688 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004689 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004690 .llseek = smb3_llseek,
Steve French38107d42012-12-08 22:08:06 -06004691};
4692
Steve French38107d42012-12-08 22:08:06 -06004693struct smb_version_operations smb30_operations = {
4694 .compare_fids = smb2_compare_fids,
4695 .setup_request = smb2_setup_request,
4696 .setup_async_request = smb2_setup_async_request,
4697 .check_receive = smb2_check_receive,
4698 .add_credits = smb2_add_credits,
4699 .set_credits = smb2_set_credits,
4700 .get_credits_field = smb2_get_credits_field,
4701 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004702 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004703 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06004704 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004705 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06004706 .read_data_offset = smb2_read_data_offset,
4707 .read_data_length = smb2_read_data_length,
4708 .map_error = map_smb2_to_linux_error,
4709 .find_mid = smb2_find_mid,
4710 .check_message = smb2_check_message,
4711 .dump_detail = smb2_dump_detail,
4712 .clear_stats = smb2_clear_stats,
4713 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05004714 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06004715 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004716 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004717 .downgrade_oplock = smb3_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06004718 .need_neg = smb2_need_neg,
4719 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004720 .negotiate_wsize = smb3_negotiate_wsize,
4721 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06004722 .sess_setup = SMB2_sess_setup,
4723 .logoff = SMB2_logoff,
4724 .tree_connect = SMB2_tcon,
4725 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05004726 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06004727 .is_path_accessible = smb2_is_path_accessible,
4728 .can_echo = smb2_can_echo,
4729 .echo = SMB2_echo,
4730 .query_path_info = smb2_query_path_info,
4731 .get_srv_inum = smb2_get_srv_inum,
4732 .query_file_info = smb2_query_file_info,
4733 .set_path_size = smb2_set_path_size,
4734 .set_file_size = smb2_set_file_size,
4735 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004736 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06004737 .mkdir = smb2_mkdir,
4738 .mkdir_setinfo = smb2_mkdir_setinfo,
4739 .rmdir = smb2_rmdir,
4740 .unlink = smb2_unlink,
4741 .rename = smb2_rename_path,
4742 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004743 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004744 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004745 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06004746 .open = smb2_open_file,
4747 .set_fid = smb2_set_fid,
4748 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06004749 .close_getattr = smb2_close_getattr,
Steve French38107d42012-12-08 22:08:06 -06004750 .flush = smb2_flush_file,
4751 .async_readv = smb2_async_readv,
4752 .async_writev = smb2_async_writev,
4753 .sync_read = smb2_sync_read,
4754 .sync_write = smb2_sync_write,
4755 .query_dir_first = smb2_query_dir_first,
4756 .query_dir_next = smb2_query_dir_next,
4757 .close_dir = smb2_close_dir,
4758 .calc_smb_size = smb2_calc_size,
4759 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004760 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06004761 .oplock_response = smb2_oplock_response,
4762 .queryfs = smb2_queryfs,
4763 .mand_lock = smb2_mand_lock,
4764 .mand_unlock_range = smb2_unlock_range,
4765 .push_mand_locks = smb2_push_mandatory_locks,
4766 .get_lease_key = smb2_get_lease_key,
4767 .set_lease_key = smb2_set_lease_key,
4768 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004769 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06004770 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004771 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004772 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004773 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004774 .create_lease_buf = smb3_create_lease_buf,
4775 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004776 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05004777 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06004778 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004779 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004780 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05004781 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004782 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004783 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004784 .is_transform_hdr = smb3_is_transform_hdr,
4785 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004786 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304787 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004788#ifdef CONFIG_CIFS_XATTR
4789 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004790 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004791#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004792 .get_acl = get_smb2_acl,
4793 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004794 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004795 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004796 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004797 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004798 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004799 .llseek = smb3_llseek,
Steve French1080ef72011-02-24 18:07:19 +00004800};
4801
Steve Frenchaab18932015-06-23 23:37:11 -05004802struct smb_version_operations smb311_operations = {
4803 .compare_fids = smb2_compare_fids,
4804 .setup_request = smb2_setup_request,
4805 .setup_async_request = smb2_setup_async_request,
4806 .check_receive = smb2_check_receive,
4807 .add_credits = smb2_add_credits,
4808 .set_credits = smb2_set_credits,
4809 .get_credits_field = smb2_get_credits_field,
4810 .get_credits = smb2_get_credits,
4811 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004812 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05004813 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004814 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05004815 .read_data_offset = smb2_read_data_offset,
4816 .read_data_length = smb2_read_data_length,
4817 .map_error = map_smb2_to_linux_error,
4818 .find_mid = smb2_find_mid,
4819 .check_message = smb2_check_message,
4820 .dump_detail = smb2_dump_detail,
4821 .clear_stats = smb2_clear_stats,
4822 .print_stats = smb2_print_stats,
4823 .dump_share_caps = smb2_dump_share_caps,
4824 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004825 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004826 .downgrade_oplock = smb3_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05004827 .need_neg = smb2_need_neg,
4828 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004829 .negotiate_wsize = smb3_negotiate_wsize,
4830 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05004831 .sess_setup = SMB2_sess_setup,
4832 .logoff = SMB2_logoff,
4833 .tree_connect = SMB2_tcon,
4834 .tree_disconnect = SMB2_tdis,
4835 .qfs_tcon = smb3_qfs_tcon,
4836 .is_path_accessible = smb2_is_path_accessible,
4837 .can_echo = smb2_can_echo,
4838 .echo = SMB2_echo,
4839 .query_path_info = smb2_query_path_info,
4840 .get_srv_inum = smb2_get_srv_inum,
4841 .query_file_info = smb2_query_file_info,
4842 .set_path_size = smb2_set_path_size,
4843 .set_file_size = smb2_set_file_size,
4844 .set_file_info = smb2_set_file_info,
4845 .set_compression = smb2_set_compression,
4846 .mkdir = smb2_mkdir,
4847 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05004848 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05004849 .rmdir = smb2_rmdir,
4850 .unlink = smb2_unlink,
4851 .rename = smb2_rename_path,
4852 .create_hardlink = smb2_create_hardlink,
4853 .query_symlink = smb2_query_symlink,
4854 .query_mf_symlink = smb3_query_mf_symlink,
4855 .create_mf_symlink = smb3_create_mf_symlink,
4856 .open = smb2_open_file,
4857 .set_fid = smb2_set_fid,
4858 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06004859 .close_getattr = smb2_close_getattr,
Steve Frenchaab18932015-06-23 23:37:11 -05004860 .flush = smb2_flush_file,
4861 .async_readv = smb2_async_readv,
4862 .async_writev = smb2_async_writev,
4863 .sync_read = smb2_sync_read,
4864 .sync_write = smb2_sync_write,
4865 .query_dir_first = smb2_query_dir_first,
4866 .query_dir_next = smb2_query_dir_next,
4867 .close_dir = smb2_close_dir,
4868 .calc_smb_size = smb2_calc_size,
4869 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004870 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05004871 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05004872 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05004873 .mand_lock = smb2_mand_lock,
4874 .mand_unlock_range = smb2_unlock_range,
4875 .push_mand_locks = smb2_push_mandatory_locks,
4876 .get_lease_key = smb2_get_lease_key,
4877 .set_lease_key = smb2_set_lease_key,
4878 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004879 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05004880 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004881 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05004882 .is_read_op = smb21_is_read_op,
4883 .set_oplock_level = smb3_set_oplock_level,
4884 .create_lease_buf = smb3_create_lease_buf,
4885 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004886 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07004887 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05004888/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
4889 .wp_retry_size = smb2_wp_retry_size,
4890 .dir_needs_close = smb2_dir_needs_close,
4891 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004892 .enum_snapshots = smb3_enum_snapshots,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004893 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004894 .is_transform_hdr = smb3_is_transform_hdr,
4895 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004896 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304897 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004898#ifdef CONFIG_CIFS_XATTR
4899 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004900 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004901#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10004902 .get_acl = get_smb2_acl,
4903 .get_acl_by_fid = get_smb2_acl_by_fid,
4904 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004905 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004906 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004907 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004908 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004909 .llseek = smb3_llseek,
Steve Frenchaab18932015-06-23 23:37:11 -05004910};
Steve Frenchaab18932015-06-23 23:37:11 -05004911
Steve Frenchdd446b12012-11-28 23:21:06 -06004912struct smb_version_values smb20_values = {
4913 .version_string = SMB20_VERSION_STRING,
4914 .protocol_id = SMB20_PROT_ID,
4915 .req_capabilities = 0, /* MBZ */
4916 .large_lock_type = 0,
4917 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4918 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4919 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004920 .header_size = sizeof(struct smb2_sync_hdr),
4921 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06004922 .max_header_size = MAX_SMB2_HDR_SIZE,
4923 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4924 .lock_cmd = SMB2_LOCK,
4925 .cap_unix = 0,
4926 .cap_nt_find = SMB2_NT_FIND,
4927 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004928 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4929 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004930 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06004931};
4932
Steve French1080ef72011-02-24 18:07:19 +00004933struct smb_version_values smb21_values = {
4934 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004935 .protocol_id = SMB21_PROT_ID,
4936 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
4937 .large_lock_type = 0,
4938 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4939 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4940 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004941 .header_size = sizeof(struct smb2_sync_hdr),
4942 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05004943 .max_header_size = MAX_SMB2_HDR_SIZE,
4944 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4945 .lock_cmd = SMB2_LOCK,
4946 .cap_unix = 0,
4947 .cap_nt_find = SMB2_NT_FIND,
4948 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04004949 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4950 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004951 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05004952};
4953
Steve French9764c022017-09-17 10:41:35 -05004954struct smb_version_values smb3any_values = {
4955 .version_string = SMB3ANY_VERSION_STRING,
4956 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004957 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004958 .large_lock_type = 0,
4959 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4960 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4961 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004962 .header_size = sizeof(struct smb2_sync_hdr),
4963 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004964 .max_header_size = MAX_SMB2_HDR_SIZE,
4965 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4966 .lock_cmd = SMB2_LOCK,
4967 .cap_unix = 0,
4968 .cap_nt_find = SMB2_NT_FIND,
4969 .cap_large_files = SMB2_LARGE_FILES,
4970 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4971 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4972 .create_lease_size = sizeof(struct create_lease_v2),
4973};
4974
4975struct smb_version_values smbdefault_values = {
4976 .version_string = SMBDEFAULT_VERSION_STRING,
4977 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05004978 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05004979 .large_lock_type = 0,
4980 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4981 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4982 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004983 .header_size = sizeof(struct smb2_sync_hdr),
4984 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05004985 .max_header_size = MAX_SMB2_HDR_SIZE,
4986 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4987 .lock_cmd = SMB2_LOCK,
4988 .cap_unix = 0,
4989 .cap_nt_find = SMB2_NT_FIND,
4990 .cap_large_files = SMB2_LARGE_FILES,
4991 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4992 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
4993 .create_lease_size = sizeof(struct create_lease_v2),
4994};
4995
Steve Frenche4aa25e2012-10-01 12:26:22 -05004996struct smb_version_values smb30_values = {
4997 .version_string = SMB30_VERSION_STRING,
4998 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05004999 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07005000 .large_lock_type = 0,
5001 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5002 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5003 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005004 .header_size = sizeof(struct smb2_sync_hdr),
5005 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04005006 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005007 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005008 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04005009 .cap_unix = 0,
5010 .cap_nt_find = SMB2_NT_FIND,
5011 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005012 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5013 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005014 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00005015};
Steve French20b6d8b2013-06-12 22:48:41 -05005016
5017struct smb_version_values smb302_values = {
5018 .version_string = SMB302_VERSION_STRING,
5019 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005020 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05005021 .large_lock_type = 0,
5022 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5023 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5024 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005025 .header_size = sizeof(struct smb2_sync_hdr),
5026 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05005027 .max_header_size = MAX_SMB2_HDR_SIZE,
5028 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5029 .lock_cmd = SMB2_LOCK,
5030 .cap_unix = 0,
5031 .cap_nt_find = SMB2_NT_FIND,
5032 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005033 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5034 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005035 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05005036};
Steve French5f7fbf72014-12-17 22:52:58 -06005037
Steve French5f7fbf72014-12-17 22:52:58 -06005038struct smb_version_values smb311_values = {
5039 .version_string = SMB311_VERSION_STRING,
5040 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005041 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06005042 .large_lock_type = 0,
5043 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5044 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5045 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005046 .header_size = sizeof(struct smb2_sync_hdr),
5047 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06005048 .max_header_size = MAX_SMB2_HDR_SIZE,
5049 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5050 .lock_cmd = SMB2_LOCK,
5051 .cap_unix = 0,
5052 .cap_nt_find = SMB2_NT_FIND,
5053 .cap_large_files = SMB2_LARGE_FILES,
5054 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5055 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5056 .create_lease_size = sizeof(struct create_lease_v2),
5057};