blob: 3dddd20c5e2bd17aff56d4cac1fb12d830c86bcc [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Steve French1080ef72011-02-24 18:07:19 +00006 */
7
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07008#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07009#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050010#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070011#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020012#include <linux/uuid.h>
Aurelien Aptel35adffe2019-09-20 06:29:39 +020013#include <linux/sort.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070014#include <crypto/aead.h>
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +100015#include "cifsfs.h"
Steve French1080ef72011-02-24 18:07:19 +000016#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040017#include "smb2pdu.h"
18#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040019#include "cifsproto.h"
20#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040021#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070022#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070023#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050024#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070025#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040026
Pavel Shilovskyef68e832019-01-18 17:25:36 -080027/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040028static int
29change_conf(struct TCP_Server_Info *server)
30{
31 server->credits += server->echo_credits + server->oplock_credits;
32 server->oplock_credits = server->echo_credits = 0;
33 switch (server->credits) {
34 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080035 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040036 case 1:
37 server->echoes = false;
38 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040039 break;
40 case 2:
41 server->echoes = true;
42 server->oplocks = false;
43 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040044 break;
45 default:
46 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050047 if (enable_oplocks) {
48 server->oplocks = true;
49 server->oplock_credits = 1;
50 } else
51 server->oplocks = false;
52
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040053 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040054 }
55 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080056 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040057}
58
59static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080060smb2_add_credits(struct TCP_Server_Info *server,
61 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040062{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080063 int *val, rc = -1;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080064 unsigned int add = credits->value;
65 unsigned int instance = credits->instance;
66 bool reconnect_detected = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080067
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040068 spin_lock(&server->req_lock);
69 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050070
71 /* eg found case where write overlapping reconnect messed up credits */
72 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
73 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
74 server->hostname, *val);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080075 if ((instance == 0) || (instance == server->reconnect_instance))
76 *val += add;
77 else
78 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050079
Steve French141891f2016-09-23 00:44:16 -050080 if (*val > 65000) {
81 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
82 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
83 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040084 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040085 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040086 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070087 /*
88 * Sometimes server returns 0 credits on oplock break ack - we need to
89 * rebalance credits in this case.
90 */
91 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
92 server->oplocks) {
93 if (server->credits > 1) {
94 server->credits--;
95 server->oplock_credits++;
96 }
97 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040098 spin_unlock(&server->req_lock);
99 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800100
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800101 if (reconnect_detected)
102 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
103 add, instance);
104
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800105 if (server->tcpStatus == CifsNeedReconnect
106 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800107 return;
108
109 switch (rc) {
110 case -1:
111 /* change_conf hasn't been executed */
112 break;
113 case 0:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000114 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800115 break;
116 case 1:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000117 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800118 break;
119 case 2:
120 cifs_dbg(FYI, "disabling oplocks\n");
121 break;
122 default:
123 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
124 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400125}
126
127static void
128smb2_set_credits(struct TCP_Server_Info *server, const int val)
129{
130 spin_lock(&server->req_lock);
131 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500132 if (val == 1)
133 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400134 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500135 /* don't log while holding the lock */
136 if (val == 1)
137 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400138}
139
140static int *
141smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
142{
143 switch (optype) {
144 case CIFS_ECHO_OP:
145 return &server->echo_credits;
146 case CIFS_OBREAK_OP:
147 return &server->oplock_credits;
148 default:
149 return &server->credits;
150 }
151}
152
153static unsigned int
154smb2_get_credits(struct mid_q_entry *mid)
155{
Pavel Shilovsky86a79642019-11-21 11:35:13 -0800156 return mid->credits_received;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400157}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400158
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400159static int
160smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800161 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400162{
163 int rc = 0;
164 unsigned int scredits;
165
166 spin_lock(&server->req_lock);
167 while (1) {
168 if (server->credits <= 0) {
169 spin_unlock(&server->req_lock);
170 cifs_num_waiters_inc(server);
171 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000172 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400173 cifs_num_waiters_dec(server);
174 if (rc)
175 return rc;
176 spin_lock(&server->req_lock);
177 } else {
178 if (server->tcpStatus == CifsExiting) {
179 spin_unlock(&server->req_lock);
180 return -ENOENT;
181 }
182
183 scredits = server->credits;
184 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800185 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400186 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800187 credits->value = 0;
188 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400189 break;
190 }
191
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800192 /* leave some credits for reopen and other ops */
193 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400194 *num = min_t(unsigned int, size,
195 scredits * SMB2_MAX_BUFFER_SIZE);
196
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800197 credits->value =
198 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
199 credits->instance = server->reconnect_instance;
200 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400201 server->in_flight++;
Steve French1b63f182019-09-09 22:57:11 -0500202 if (server->in_flight > server->max_in_flight)
203 server->max_in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400204 break;
205 }
206 }
207 spin_unlock(&server->req_lock);
208 return rc;
209}
210
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800211static int
212smb2_adjust_credits(struct TCP_Server_Info *server,
213 struct cifs_credits *credits,
214 const unsigned int payload_size)
215{
216 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
217
218 if (!credits->value || credits->value == new_val)
219 return 0;
220
221 if (credits->value < new_val) {
222 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
223 credits->value, new_val);
224 return -ENOTSUPP;
225 }
226
227 spin_lock(&server->req_lock);
228
229 if (server->reconnect_instance != credits->instance) {
230 spin_unlock(&server->req_lock);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000231 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800232 credits->value - new_val);
233 return -EAGAIN;
234 }
235
236 server->credits += credits->value - new_val;
237 spin_unlock(&server->req_lock);
238 wake_up(&server->request_q);
239 credits->value = new_val;
240 return 0;
241}
242
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400243static __u64
244smb2_get_next_mid(struct TCP_Server_Info *server)
245{
246 __u64 mid;
247 /* for SMB2 we need the current value */
248 spin_lock(&GlobalMid_Lock);
249 mid = server->CurrentMid++;
250 spin_unlock(&GlobalMid_Lock);
251 return mid;
252}
Steve French1080ef72011-02-24 18:07:19 +0000253
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800254static void
255smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
256{
257 spin_lock(&GlobalMid_Lock);
258 if (server->CurrentMid >= val)
259 server->CurrentMid -= val;
260 spin_unlock(&GlobalMid_Lock);
261}
262
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400263static struct mid_q_entry *
264smb2_find_mid(struct TCP_Server_Info *server, char *buf)
265{
266 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000267 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700268 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400269
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700270 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000271 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600272 return NULL;
273 }
274
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400275 spin_lock(&GlobalMid_Lock);
276 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000277 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400278 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700279 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200280 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400281 spin_unlock(&GlobalMid_Lock);
282 return mid;
283 }
284 }
285 spin_unlock(&GlobalMid_Lock);
286 return NULL;
287}
288
289static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600290smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400291{
292#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000293 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400294
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000295 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700296 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
297 shdr->ProcessId);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000298 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500299 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400300#endif
301}
302
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400303static bool
304smb2_need_neg(struct TCP_Server_Info *server)
305{
306 return server->max_read == 0;
307}
308
309static int
310smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
311{
312 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200313
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200314 cifs_ses_server(ses)->CurrentMid = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400315 rc = SMB2_negotiate(xid, ses);
316 /* BB we probably don't need to retry with modern servers */
317 if (rc == -EAGAIN)
318 rc = -EHOSTDOWN;
319 return rc;
320}
321
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700322static unsigned int
323smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
324{
325 struct TCP_Server_Info *server = tcon->ses->server;
326 unsigned int wsize;
327
328 /* start with specified wsize, or default */
329 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
330 wsize = min_t(unsigned int, wsize, server->max_write);
Long Li09902f82017-11-22 17:38:39 -0700331#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700332 if (server->rdma) {
333 if (server->sign)
334 wsize = min_t(unsigned int,
335 wsize, server->smbd_conn->max_fragmented_send_size);
336 else
337 wsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700338 wsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700339 }
Long Li09902f82017-11-22 17:38:39 -0700340#endif
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400341 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
342 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700343
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700344 return wsize;
345}
346
347static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500348smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
349{
350 struct TCP_Server_Info *server = tcon->ses->server;
351 unsigned int wsize;
352
353 /* start with specified wsize, or default */
354 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
355 wsize = min_t(unsigned int, wsize, server->max_write);
356#ifdef CONFIG_CIFS_SMB_DIRECT
357 if (server->rdma) {
358 if (server->sign)
359 wsize = min_t(unsigned int,
360 wsize, server->smbd_conn->max_fragmented_send_size);
361 else
362 wsize = min_t(unsigned int,
363 wsize, server->smbd_conn->max_readwrite_size);
364 }
365#endif
366 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
367 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
368
369 return wsize;
370}
371
372static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700373smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
374{
375 struct TCP_Server_Info *server = tcon->ses->server;
376 unsigned int rsize;
377
378 /* start with specified rsize, or default */
379 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
380 rsize = min_t(unsigned int, rsize, server->max_read);
Long Li09902f82017-11-22 17:38:39 -0700381#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700382 if (server->rdma) {
383 if (server->sign)
384 rsize = min_t(unsigned int,
385 rsize, server->smbd_conn->max_fragmented_recv_size);
386 else
387 rsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700388 rsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700389 }
Long Li09902f82017-11-22 17:38:39 -0700390#endif
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400391
392 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
393 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700394
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700395 return rsize;
396}
397
Steve French3d621232018-09-25 15:33:47 -0500398static unsigned int
399smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
400{
401 struct TCP_Server_Info *server = tcon->ses->server;
402 unsigned int rsize;
403
404 /* start with specified rsize, or default */
405 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
406 rsize = min_t(unsigned int, rsize, server->max_read);
407#ifdef CONFIG_CIFS_SMB_DIRECT
408 if (server->rdma) {
409 if (server->sign)
410 rsize = min_t(unsigned int,
411 rsize, server->smbd_conn->max_fragmented_recv_size);
412 else
413 rsize = min_t(unsigned int,
414 rsize, server->smbd_conn->max_readwrite_size);
415 }
416#endif
417
418 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
419 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
420
421 return rsize;
422}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200423
424static int
425parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
426 size_t buf_len,
427 struct cifs_server_iface **iface_list,
428 size_t *iface_count)
429{
430 struct network_interface_info_ioctl_rsp *p;
431 struct sockaddr_in *addr4;
432 struct sockaddr_in6 *addr6;
433 struct iface_info_ipv4 *p4;
434 struct iface_info_ipv6 *p6;
435 struct cifs_server_iface *info;
436 ssize_t bytes_left;
437 size_t next = 0;
438 int nb_iface = 0;
439 int rc = 0;
440
441 *iface_list = NULL;
442 *iface_count = 0;
443
444 /*
445 * Fist pass: count and sanity check
446 */
447
448 bytes_left = buf_len;
449 p = buf;
450 while (bytes_left >= sizeof(*p)) {
451 nb_iface++;
452 next = le32_to_cpu(p->Next);
453 if (!next) {
454 bytes_left -= sizeof(*p);
455 break;
456 }
457 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
458 bytes_left -= next;
459 }
460
461 if (!nb_iface) {
462 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
463 rc = -EINVAL;
464 goto out;
465 }
466
467 if (bytes_left || p->Next)
468 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
469
470
471 /*
472 * Second pass: extract info to internal structure
473 */
474
475 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
476 if (!*iface_list) {
477 rc = -ENOMEM;
478 goto out;
479 }
480
481 info = *iface_list;
482 bytes_left = buf_len;
483 p = buf;
484 while (bytes_left >= sizeof(*p)) {
485 info->speed = le64_to_cpu(p->LinkSpeed);
486 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
487 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
488
489 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
490 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
491 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
492 le32_to_cpu(p->Capability));
493
494 switch (p->Family) {
495 /*
496 * The kernel and wire socket structures have the same
497 * layout and use network byte order but make the
498 * conversion explicit in case either one changes.
499 */
500 case INTERNETWORK:
501 addr4 = (struct sockaddr_in *)&info->sockaddr;
502 p4 = (struct iface_info_ipv4 *)p->Buffer;
503 addr4->sin_family = AF_INET;
504 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
505
506 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
507 addr4->sin_port = cpu_to_be16(CIFS_PORT);
508
509 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
510 &addr4->sin_addr);
511 break;
512 case INTERNETWORKV6:
513 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
514 p6 = (struct iface_info_ipv6 *)p->Buffer;
515 addr6->sin6_family = AF_INET6;
516 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
517
518 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
519 addr6->sin6_flowinfo = 0;
520 addr6->sin6_scope_id = 0;
521 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
522
523 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
524 &addr6->sin6_addr);
525 break;
526 default:
527 cifs_dbg(VFS,
528 "%s: skipping unsupported socket family\n",
529 __func__);
530 goto next_iface;
531 }
532
533 (*iface_count)++;
534 info++;
535next_iface:
536 next = le32_to_cpu(p->Next);
537 if (!next)
538 break;
539 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
540 bytes_left -= next;
541 }
542
543 if (!*iface_count) {
544 rc = -EINVAL;
545 goto out;
546 }
547
548out:
549 if (rc) {
550 kfree(*iface_list);
551 *iface_count = 0;
552 *iface_list = NULL;
553 }
554 return rc;
555}
556
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200557static int compare_iface(const void *ia, const void *ib)
558{
559 const struct cifs_server_iface *a = (struct cifs_server_iface *)ia;
560 const struct cifs_server_iface *b = (struct cifs_server_iface *)ib;
561
562 return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1);
563}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200564
Steve Frenchc481e9f2013-10-14 01:21:53 -0500565static int
566SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
567{
568 int rc;
569 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200570 struct network_interface_info_ioctl_rsp *out_buf = NULL;
571 struct cifs_server_iface *iface_list;
572 size_t iface_count;
573 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500574
575 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
576 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
577 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500578 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500579 if (rc == -EOPNOTSUPP) {
580 cifs_dbg(FYI,
581 "server does not support query network interfaces\n");
582 goto out;
583 } else if (rc != 0) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000584 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200585 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500586 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200587
588 rc = parse_server_interfaces(out_buf, ret_data_len,
589 &iface_list, &iface_count);
590 if (rc)
591 goto out;
592
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200593 /* sort interfaces from fastest to slowest */
594 sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL);
595
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200596 spin_lock(&ses->iface_lock);
597 kfree(ses->iface_list);
598 ses->iface_list = iface_list;
599 ses->iface_count = iface_count;
600 ses->iface_last_update = jiffies;
601 spin_unlock(&ses->iface_lock);
602
603out:
Steve French24df1482016-09-29 04:20:23 -0500604 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500605 return rc;
606}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500607
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000608static void
609smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000610{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000611 struct cached_fid *cfid = container_of(ref, struct cached_fid,
612 refcount);
613
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000614 if (cfid->is_valid) {
615 cifs_dbg(FYI, "clear cached root file handle\n");
616 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
617 cfid->fid->volatile_fid);
618 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000619 cfid->file_all_info_is_valid = false;
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800620 cfid->has_lease = false;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000621 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000622}
623
624void close_shroot(struct cached_fid *cfid)
625{
626 mutex_lock(&cfid->fid_mutex);
627 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000628 mutex_unlock(&cfid->fid_mutex);
629}
630
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800631void close_shroot_lease_locked(struct cached_fid *cfid)
632{
633 if (cfid->has_lease) {
634 cfid->has_lease = false;
635 kref_put(&cfid->refcount, smb2_close_cached_fid);
636 }
637}
638
639void close_shroot_lease(struct cached_fid *cfid)
640{
641 mutex_lock(&cfid->fid_mutex);
642 close_shroot_lease_locked(cfid);
643 mutex_unlock(&cfid->fid_mutex);
644}
645
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000646void
647smb2_cached_lease_break(struct work_struct *work)
648{
649 struct cached_fid *cfid = container_of(work,
650 struct cached_fid, lease_break);
651
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800652 close_shroot_lease(cfid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000653}
654
Steve French3d4ef9a2018-04-25 22:19:09 -0500655/*
656 * Open the directory at the root of a share
657 */
Amir Goldstein0f060932020-02-03 21:46:43 +0200658int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
659 struct cifs_sb_info *cifs_sb, struct cifs_fid *pfid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500660{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000661 struct cifs_ses *ses = tcon->ses;
662 struct TCP_Server_Info *server = ses->server;
663 struct cifs_open_parms oparms;
664 struct smb2_create_rsp *o_rsp = NULL;
665 struct smb2_query_info_rsp *qi_rsp = NULL;
666 int resp_buftype[2];
667 struct smb_rqst rqst[2];
668 struct kvec rsp_iov[2];
669 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
670 struct kvec qi_iov[1];
671 int rc, flags = 0;
672 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000673 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Steve French3d4ef9a2018-04-25 22:19:09 -0500674
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000675 mutex_lock(&tcon->crfid.fid_mutex);
676 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500677 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000678 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000679 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000680 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500681 return 0;
682 }
683
Steve French96d9f7e2019-09-12 17:52:54 -0500684 /*
685 * We do not hold the lock for the open because in case
686 * SMB2_open needs to reconnect, it will end up calling
687 * cifs_mark_open_files_invalid() which takes the lock again
688 * thus causing a deadlock
689 */
690
691 mutex_unlock(&tcon->crfid.fid_mutex);
692
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000693 if (smb3_encryption_required(tcon))
694 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500695
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000696 memset(rqst, 0, sizeof(rqst));
697 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
698 memset(rsp_iov, 0, sizeof(rsp_iov));
699
700 /* Open */
701 memset(&open_iov, 0, sizeof(open_iov));
702 rqst[0].rq_iov = open_iov;
703 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
704
705 oparms.tcon = tcon;
Amir Goldstein0f060932020-02-03 21:46:43 +0200706 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000707 oparms.desired_access = FILE_READ_ATTRIBUTES;
708 oparms.disposition = FILE_OPEN;
709 oparms.fid = pfid;
710 oparms.reconnect = false;
711
712 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
713 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500714 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000715 smb2_set_next_command(tcon, &rqst[0]);
716
717 memset(&qi_iov, 0, sizeof(qi_iov));
718 rqst[1].rq_iov = qi_iov;
719 rqst[1].rq_nvec = 1;
720
721 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
722 COMPOUND_FID, FILE_ALL_INFORMATION,
723 SMB2_O_INFO_FILE, 0,
724 sizeof(struct smb2_file_all_info) +
725 PATH_MAX * 2, 0, NULL);
726 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500727 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000728
729 smb2_set_related(&rqst[1]);
730
731 rc = compound_send_recv(xid, ses, flags, 2, rqst,
732 resp_buftype, rsp_iov);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200733 mutex_lock(&tcon->crfid.fid_mutex);
734
735 /*
736 * Now we need to check again as the cached root might have
737 * been successfully re-opened from a concurrent process
738 */
739
740 if (tcon->crfid.is_valid) {
741 /* work was already done */
742
743 /* stash fids for close() later */
744 struct cifs_fid fid = {
745 .persistent_fid = pfid->persistent_fid,
746 .volatile_fid = pfid->volatile_fid,
747 };
748
749 /*
750 * caller expects this func to set pfid to a valid
751 * cached root, so we copy the existing one and get a
752 * reference.
753 */
754 memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
755 kref_get(&tcon->crfid.refcount);
756
757 mutex_unlock(&tcon->crfid.fid_mutex);
758
759 if (rc == 0) {
760 /* close extra handle outside of crit sec */
761 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
762 }
763 goto oshr_free;
764 }
765
766 /* Cached root is still invalid, continue normaly */
767
Steve French7dcc82c2019-09-11 00:07:36 -0500768 if (rc) {
769 if (rc == -EREMCHG) {
770 tcon->need_reconnect = true;
771 printk_once(KERN_WARNING "server share %s deleted\n",
772 tcon->treeName);
773 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000774 goto oshr_exit;
Steve French7dcc82c2019-09-11 00:07:36 -0500775 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000776
Steve Frenchd2f15422019-09-22 00:55:46 -0500777 atomic_inc(&tcon->num_remote_opens);
778
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000779 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
780 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
781 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
782#ifdef CONFIG_CIFS_DEBUG2
783 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
784#endif /* CIFS_DEBUG2 */
785
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000786 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
787 tcon->crfid.tcon = tcon;
788 tcon->crfid.is_valid = true;
789 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000790
Steve French89a5bfa2019-07-18 17:22:18 -0500791 /* BB TBD check to see if oplock level check can be removed below */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000792 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
793 kref_get(&tcon->crfid.refcount);
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800794 tcon->crfid.has_lease = true;
Steve French89a5bfa2019-07-18 17:22:18 -0500795 smb2_parse_contexts(server, o_rsp,
796 &oparms.fid->epoch,
797 oparms.fid->lease_key, &oplock, NULL);
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000798 } else
799 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000800
801 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
802 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
803 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000804 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000805 le16_to_cpu(qi_rsp->OutputBufferOffset),
806 sizeof(struct smb2_file_all_info),
807 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000808 (char *)&tcon->crfid.file_all_info))
zhengbin720aec02019-12-25 11:30:20 +0800809 tcon->crfid.file_all_info_is_valid = true;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000810
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200811oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000812 mutex_unlock(&tcon->crfid.fid_mutex);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200813oshr_free:
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000814 SMB2_open_free(&rqst[0]);
815 SMB2_query_info_free(&rqst[1]);
816 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
817 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French3d4ef9a2018-04-25 22:19:09 -0500818 return rc;
819}
820
Steve French34f62642013-10-09 02:07:00 -0500821static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200822smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
823 struct cifs_sb_info *cifs_sb)
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500824{
825 int rc;
826 __le16 srch_path = 0; /* Null - open root of share */
827 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
828 struct cifs_open_parms oparms;
829 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500830 bool no_cached_open = tcon->nohandlecache;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500831
832 oparms.tcon = tcon;
833 oparms.desired_access = FILE_READ_ATTRIBUTES;
834 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200835 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500836 oparms.fid = &fid;
837 oparms.reconnect = false;
838
Steve French3d4ef9a2018-04-25 22:19:09 -0500839 if (no_cached_open)
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000840 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
841 NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500842 else
Amir Goldstein0f060932020-02-03 21:46:43 +0200843 rc = open_shroot(xid, tcon, cifs_sb, &fid);
Steve French3d4ef9a2018-04-25 22:19:09 -0500844
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500845 if (rc)
846 return;
847
Steve Frenchc481e9f2013-10-14 01:21:53 -0500848 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500849
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500850 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
851 FS_ATTRIBUTE_INFORMATION);
852 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
853 FS_DEVICE_INFORMATION);
854 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500855 FS_VOLUME_INFORMATION);
856 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500857 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500858 if (no_cached_open)
859 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000860 else
861 close_shroot(&tcon->crfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500862}
863
864static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200865smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
866 struct cifs_sb_info *cifs_sb)
Steve French34f62642013-10-09 02:07:00 -0500867{
868 int rc;
869 __le16 srch_path = 0; /* Null - open root of share */
870 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
871 struct cifs_open_parms oparms;
872 struct cifs_fid fid;
873
874 oparms.tcon = tcon;
875 oparms.desired_access = FILE_READ_ATTRIBUTES;
876 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200877 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French34f62642013-10-09 02:07:00 -0500878 oparms.fid = &fid;
879 oparms.reconnect = false;
880
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000881 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500882 if (rc)
883 return;
884
Steven French21671142013-10-09 13:36:35 -0500885 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
886 FS_ATTRIBUTE_INFORMATION);
887 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
888 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500889 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -0500890}
891
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400892static int
893smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
894 struct cifs_sb_info *cifs_sb, const char *full_path)
895{
896 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400897 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700898 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400899 struct cifs_open_parms oparms;
900 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400901
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000902 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500903 return 0;
904
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400905 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
906 if (!utf16_path)
907 return -ENOMEM;
908
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400909 oparms.tcon = tcon;
910 oparms.desired_access = FILE_READ_ATTRIBUTES;
911 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200912 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400913 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400914 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400915
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000916 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400917 if (rc) {
918 kfree(utf16_path);
919 return rc;
920 }
921
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400922 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400923 kfree(utf16_path);
924 return rc;
925}
926
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400927static int
928smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
929 struct cifs_sb_info *cifs_sb, const char *full_path,
930 u64 *uniqueid, FILE_ALL_INFO *data)
931{
932 *uniqueid = le64_to_cpu(data->IndexNumber);
933 return 0;
934}
935
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700936static int
937smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
938 struct cifs_fid *fid, FILE_ALL_INFO *data)
939{
940 int rc;
941 struct smb2_file_all_info *smb2_data;
942
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400943 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700944 GFP_KERNEL);
945 if (smb2_data == NULL)
946 return -ENOMEM;
947
948 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
949 smb2_data);
950 if (!rc)
951 move_smb2_info_to_cifs(data, smb2_data);
952 kfree(smb2_data);
953 return rc;
954}
955
Arnd Bergmann1368f152017-09-05 11:24:15 +0200956#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000957static ssize_t
958move_smb2_ea_to_cifs(char *dst, size_t dst_size,
959 struct smb2_file_full_ea_info *src, size_t src_size,
960 const unsigned char *ea_name)
961{
962 int rc = 0;
963 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
964 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000965 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000966 size_t name_len, value_len, user_name_len;
967
968 while (src_size > 0) {
969 name = &src->ea_data[0];
970 name_len = (size_t)src->ea_name_length;
971 value = &src->ea_data[src->ea_name_length + 1];
972 value_len = (size_t)le16_to_cpu(src->ea_value_length);
973
Christoph Probsta205d502019-05-08 21:36:25 +0200974 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000975 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000976
977 if (src_size < 8 + name_len + 1 + value_len) {
978 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
979 rc = -EIO;
980 goto out;
981 }
982
983 if (ea_name) {
984 if (ea_name_len == name_len &&
985 memcmp(ea_name, name, name_len) == 0) {
986 rc = value_len;
987 if (dst_size == 0)
988 goto out;
989 if (dst_size < value_len) {
990 rc = -ERANGE;
991 goto out;
992 }
993 memcpy(dst, value, value_len);
994 goto out;
995 }
996 } else {
997 /* 'user.' plus a terminating null */
998 user_name_len = 5 + 1 + name_len;
999
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001000 if (buf_size == 0) {
1001 /* skip copy - calc size only */
1002 rc += user_name_len;
1003 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001004 dst_size -= user_name_len;
1005 memcpy(dst, "user.", 5);
1006 dst += 5;
1007 memcpy(dst, src->ea_data, name_len);
1008 dst += name_len;
1009 *dst = 0;
1010 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001011 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001012 } else {
1013 /* stop before overrun buffer */
1014 rc = -ERANGE;
1015 break;
1016 }
1017 }
1018
1019 if (!src->next_entry_offset)
1020 break;
1021
1022 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1023 /* stop before overrun buffer */
1024 rc = -ERANGE;
1025 break;
1026 }
1027 src_size -= le32_to_cpu(src->next_entry_offset);
1028 src = (void *)((char *)src +
1029 le32_to_cpu(src->next_entry_offset));
1030 }
1031
1032 /* didn't find the named attribute */
1033 if (ea_name)
1034 rc = -ENODATA;
1035
1036out:
1037 return (ssize_t)rc;
1038}
1039
1040static ssize_t
1041smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1042 const unsigned char *path, const unsigned char *ea_name,
1043 char *ea_data, size_t buf_size,
1044 struct cifs_sb_info *cifs_sb)
1045{
1046 int rc;
1047 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001048 struct kvec rsp_iov = {NULL, 0};
1049 int buftype = CIFS_NO_BUFFER;
1050 struct smb2_query_info_rsp *rsp;
1051 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001052
1053 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1054 if (!utf16_path)
1055 return -ENOMEM;
1056
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001057 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1058 FILE_READ_EA,
1059 FILE_FULL_EA_INFORMATION,
1060 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +10001061 CIFSMaxBufSize -
1062 MAX_SMB2_CREATE_RESPONSE_SIZE -
1063 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001064 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001065 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001066 /*
1067 * If ea_name is NULL (listxattr) and there are no EAs,
1068 * return 0 as it's not an error. Otherwise, the specified
1069 * ea_name was not found.
1070 */
1071 if (!ea_name && rc == -ENODATA)
1072 rc = 0;
1073 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001074 }
1075
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001076 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1077 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1078 le32_to_cpu(rsp->OutputBufferLength),
1079 &rsp_iov,
1080 sizeof(struct smb2_file_full_ea_info));
1081 if (rc)
1082 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001083
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001084 info = (struct smb2_file_full_ea_info *)(
1085 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1086 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1087 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001088
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001089 qeas_exit:
1090 kfree(utf16_path);
1091 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001092 return rc;
1093}
1094
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001095
1096static int
1097smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1098 const char *path, const char *ea_name, const void *ea_value,
1099 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1100 struct cifs_sb_info *cifs_sb)
1101{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001102 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001103 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001104 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001105 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001106 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001107 struct smb_rqst rqst[3];
1108 int resp_buftype[3];
1109 struct kvec rsp_iov[3];
1110 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1111 struct cifs_open_parms oparms;
1112 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1113 struct cifs_fid fid;
1114 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1115 unsigned int size[1];
1116 void *data[1];
1117 struct smb2_file_full_ea_info *ea = NULL;
1118 struct kvec close_iov[1];
Ronnie Sahlberg85db6b72020-02-13 12:14:47 +10001119 struct smb2_query_info_rsp *rsp;
1120 int rc, used_len = 0;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001121
1122 if (smb3_encryption_required(tcon))
1123 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001124
1125 if (ea_name_len > 255)
1126 return -EINVAL;
1127
1128 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1129 if (!utf16_path)
1130 return -ENOMEM;
1131
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001132 memset(rqst, 0, sizeof(rqst));
1133 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1134 memset(rsp_iov, 0, sizeof(rsp_iov));
1135
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001136 if (ses->server->ops->query_all_EAs) {
1137 if (!ea_value) {
1138 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1139 ea_name, NULL, 0,
1140 cifs_sb);
1141 if (rc == -ENODATA)
1142 goto sea_exit;
Ronnie Sahlberg85db6b72020-02-13 12:14:47 +10001143 } else {
1144 /* If we are adding a attribute we should first check
1145 * if there will be enough space available to store
1146 * the new EA. If not we should not add it since we
1147 * would not be able to even read the EAs back.
1148 */
1149 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1150 FILE_READ_EA,
1151 FILE_FULL_EA_INFORMATION,
1152 SMB2_O_INFO_FILE,
1153 CIFSMaxBufSize -
1154 MAX_SMB2_CREATE_RESPONSE_SIZE -
1155 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1156 &rsp_iov[1], &resp_buftype[1], cifs_sb);
1157 if (rc == 0) {
1158 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1159 used_len = le32_to_cpu(rsp->OutputBufferLength);
1160 }
1161 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1162 resp_buftype[1] = CIFS_NO_BUFFER;
1163 memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1164 rc = 0;
1165
1166 /* Use a fudge factor of 256 bytes in case we collide
1167 * with a different set_EAs command.
1168 */
1169 if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1170 MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1171 used_len + ea_name_len + ea_value_len + 1) {
1172 rc = -ENOSPC;
1173 goto sea_exit;
1174 }
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001175 }
1176 }
1177
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001178 /* Open */
1179 memset(&open_iov, 0, sizeof(open_iov));
1180 rqst[0].rq_iov = open_iov;
1181 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1182
1183 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001184 oparms.tcon = tcon;
1185 oparms.desired_access = FILE_WRITE_EA;
1186 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001187 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001188 oparms.fid = &fid;
1189 oparms.reconnect = false;
1190
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001191 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
1192 if (rc)
1193 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001194 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001195
1196
1197 /* Set Info */
1198 memset(&si_iov, 0, sizeof(si_iov));
1199 rqst[1].rq_iov = si_iov;
1200 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001201
1202 len = sizeof(ea) + ea_name_len + ea_value_len + 1;
1203 ea = kzalloc(len, GFP_KERNEL);
1204 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001205 rc = -ENOMEM;
1206 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001207 }
1208
1209 ea->ea_name_length = ea_name_len;
1210 ea->ea_value_length = cpu_to_le16(ea_value_len);
1211 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1212 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1213
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001214 size[0] = len;
1215 data[0] = ea;
1216
1217 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
1218 COMPOUND_FID, current->tgid,
1219 FILE_FULL_EA_INFORMATION,
1220 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001221 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001222 smb2_set_related(&rqst[1]);
1223
1224
1225 /* Close */
1226 memset(&close_iov, 0, sizeof(close_iov));
1227 rqst[2].rq_iov = close_iov;
1228 rqst[2].rq_nvec = 1;
Steve French43f8a6a2019-12-02 21:46:54 -06001229 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001230 smb2_set_related(&rqst[2]);
1231
1232 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1233 resp_buftype, rsp_iov);
Steve Frenchd2f15422019-09-22 00:55:46 -05001234 /* no need to bump num_remote_opens because handle immediately closed */
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001235
1236 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001237 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001238 kfree(utf16_path);
1239 SMB2_open_free(&rqst[0]);
1240 SMB2_set_info_free(&rqst[1]);
1241 SMB2_close_free(&rqst[2]);
1242 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1243 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1244 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001245 return rc;
1246}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001247#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001248
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001249static bool
1250smb2_can_echo(struct TCP_Server_Info *server)
1251{
1252 return server->echoes;
1253}
1254
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001255static void
1256smb2_clear_stats(struct cifs_tcon *tcon)
1257{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001258 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001259
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001260 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1261 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1262 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1263 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001264}
1265
1266static void
Steve French769ee6a2013-06-19 14:15:30 -05001267smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1268{
1269 seq_puts(m, "\n\tShare Capabilities:");
1270 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1271 seq_puts(m, " DFS,");
1272 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1273 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1274 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1275 seq_puts(m, " SCALEOUT,");
1276 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1277 seq_puts(m, " CLUSTER,");
1278 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1279 seq_puts(m, " ASYMMETRIC,");
1280 if (tcon->capabilities == 0)
1281 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001282 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1283 seq_puts(m, " Aligned,");
1284 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1285 seq_puts(m, " Partition Aligned,");
1286 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1287 seq_puts(m, " SSD,");
1288 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1289 seq_puts(m, " TRIM-support,");
1290
Steve French769ee6a2013-06-19 14:15:30 -05001291 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001292 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001293 if (tcon->perf_sector_size)
1294 seq_printf(m, "\tOptimal sector size: 0x%x",
1295 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001296 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001297}
1298
1299static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001300smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1301{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001302 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1303 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001304
1305 /*
1306 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1307 * totals (requests sent) since those SMBs are per-session not per tcon
1308 */
Steve French52ce1ac2018-07-31 01:46:47 -05001309 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1310 (long long)(tcon->bytes_read),
1311 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001312 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1313 atomic_read(&tcon->num_local_opens),
1314 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001315 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001316 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1317 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001318 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001319 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1320 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001321 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001322 atomic_read(&sent[SMB2_CREATE_HE]),
1323 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001324 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001325 atomic_read(&sent[SMB2_CLOSE_HE]),
1326 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001327 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001328 atomic_read(&sent[SMB2_FLUSH_HE]),
1329 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001330 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001331 atomic_read(&sent[SMB2_READ_HE]),
1332 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001333 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001334 atomic_read(&sent[SMB2_WRITE_HE]),
1335 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001336 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001337 atomic_read(&sent[SMB2_LOCK_HE]),
1338 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001339 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001340 atomic_read(&sent[SMB2_IOCTL_HE]),
1341 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001342 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001343 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1344 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001345 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001346 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1347 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001348 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001349 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1350 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001351 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001352 atomic_read(&sent[SMB2_SET_INFO_HE]),
1353 atomic_read(&failed[SMB2_SET_INFO_HE]));
1354 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1355 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1356 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001357}
1358
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001359static void
1360smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1361{
David Howells2b0143b2015-03-17 22:25:59 +00001362 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001363 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1364
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001365 cfile->fid.persistent_fid = fid->persistent_fid;
1366 cfile->fid.volatile_fid = fid->volatile_fid;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01001367 cfile->fid.access = fid->access;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001368#ifdef CONFIG_CIFS_DEBUG2
1369 cfile->fid.mid = fid->mid;
1370#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001371 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1372 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001373 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001374 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001375}
1376
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001377static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001378smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1379 struct cifs_fid *fid)
1380{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001381 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001382}
1383
Steve French43f8a6a2019-12-02 21:46:54 -06001384static void
1385smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
1386 struct cifsFileInfo *cfile)
1387{
1388 struct smb2_file_network_open_info file_inf;
1389 struct inode *inode;
1390 int rc;
1391
1392 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
1393 cfile->fid.volatile_fid, &file_inf);
1394 if (rc)
1395 return;
1396
1397 inode = d_inode(cfile->dentry);
1398
1399 spin_lock(&inode->i_lock);
1400 CIFS_I(inode)->time = jiffies;
1401
1402 /* Creation time should not need to be updated on close */
1403 if (file_inf.LastWriteTime)
1404 inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
1405 if (file_inf.ChangeTime)
1406 inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
1407 if (file_inf.LastAccessTime)
1408 inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
1409
1410 /*
1411 * i_blocks is not related to (i_size / i_blksize),
1412 * but instead 512 byte (2**9) size is required for
1413 * calculating num blocks.
1414 */
1415 if (le64_to_cpu(file_inf.AllocationSize) > 4096)
1416 inode->i_blocks =
1417 (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
1418
1419 /* End of file and Attributes should not have to be updated on close */
1420 spin_unlock(&inode->i_lock);
1421}
1422
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001423static int
Steve French41c13582013-11-14 00:05:36 -06001424SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1425 u64 persistent_fid, u64 volatile_fid,
1426 struct copychunk_ioctl *pcchunk)
1427{
1428 int rc;
1429 unsigned int ret_data_len;
1430 struct resume_key_req *res_key;
1431
1432 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1433 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001434 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001435 (char **)&res_key, &ret_data_len);
1436
1437 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001438 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
Steve French41c13582013-11-14 00:05:36 -06001439 goto req_res_key_exit;
1440 }
1441 if (ret_data_len < sizeof(struct resume_key_req)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001442 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
Steve French41c13582013-11-14 00:05:36 -06001443 rc = -EINVAL;
1444 goto req_res_key_exit;
1445 }
1446 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1447
1448req_res_key_exit:
1449 kfree(res_key);
1450 return rc;
1451}
1452
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001453static int
1454smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001455 struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02001456 struct cifs_sb_info *cifs_sb,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001457 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001458 unsigned long p)
1459{
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001460 struct cifs_ses *ses = tcon->ses;
1461 char __user *arg = (char __user *)p;
1462 struct smb_query_info qi;
1463 struct smb_query_info __user *pqi;
1464 int rc = 0;
1465 int flags = 0;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001466 struct smb2_query_info_rsp *qi_rsp = NULL;
1467 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001468 void *buffer = NULL;
1469 struct smb_rqst rqst[3];
1470 int resp_buftype[3];
1471 struct kvec rsp_iov[3];
1472 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1473 struct cifs_open_parms oparms;
1474 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1475 struct cifs_fid fid;
1476 struct kvec qi_iov[1];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001477 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001478 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001479 struct kvec close_iov[1];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001480 unsigned int size[2];
1481 void *data[2];
Amir Goldstein0f060932020-02-03 21:46:43 +02001482 int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001483
1484 memset(rqst, 0, sizeof(rqst));
1485 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1486 memset(rsp_iov, 0, sizeof(rsp_iov));
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001487
1488 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1489 return -EFAULT;
1490
1491 if (qi.output_buffer_length > 1024)
1492 return -EINVAL;
1493
1494 if (!ses || !(ses->server))
1495 return -EIO;
1496
1497 if (smb3_encryption_required(tcon))
1498 flags |= CIFS_TRANSFORM_REQ;
1499
Markus Elfringcfaa1182019-11-05 21:30:25 +01001500 buffer = memdup_user(arg + sizeof(struct smb_query_info),
1501 qi.output_buffer_length);
1502 if (IS_ERR(buffer))
1503 return PTR_ERR(buffer);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001504
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001505 /* Open */
1506 memset(&open_iov, 0, sizeof(open_iov));
1507 rqst[0].rq_iov = open_iov;
1508 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001509
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001510 memset(&oparms, 0, sizeof(oparms));
1511 oparms.tcon = tcon;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001512 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001513 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001514 oparms.fid = &fid;
1515 oparms.reconnect = false;
1516
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001517 if (qi.flags & PASSTHRU_FSCTL) {
1518 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1519 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1520 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001521 break;
1522 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1523 oparms.desired_access = GENERIC_ALL;
1524 break;
1525 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1526 oparms.desired_access = GENERIC_READ;
1527 break;
1528 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1529 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001530 break;
1531 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001532 } else if (qi.flags & PASSTHRU_SET_INFO) {
1533 oparms.desired_access = GENERIC_WRITE;
1534 } else {
1535 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001536 }
1537
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001538 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1539 if (rc)
1540 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001541 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001542
1543 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001544 if (qi.flags & PASSTHRU_FSCTL) {
1545 /* Can eventually relax perm check since server enforces too */
1546 if (!capable(CAP_SYS_ADMIN))
1547 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001548 else {
1549 memset(&io_iov, 0, sizeof(io_iov));
1550 rqst[1].rq_iov = io_iov;
1551 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1552
1553 rc = SMB2_ioctl_init(tcon, &rqst[1],
1554 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001555 qi.info_type, true, buffer,
1556 qi.output_buffer_length,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10001557 CIFSMaxBufSize -
1558 MAX_SMB2_CREATE_RESPONSE_SIZE -
1559 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001560 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001561 } else if (qi.flags == PASSTHRU_SET_INFO) {
1562 /* Can eventually relax perm check since server enforces too */
1563 if (!capable(CAP_SYS_ADMIN))
1564 rc = -EPERM;
1565 else {
1566 memset(&si_iov, 0, sizeof(si_iov));
1567 rqst[1].rq_iov = si_iov;
1568 rqst[1].rq_nvec = 1;
1569
1570 size[0] = 8;
1571 data[0] = buffer;
1572
1573 rc = SMB2_set_info_init(tcon, &rqst[1],
1574 COMPOUND_FID, COMPOUND_FID,
1575 current->tgid,
1576 FILE_END_OF_FILE_INFORMATION,
1577 SMB2_O_INFO_FILE, 0, data, size);
1578 }
Steve French31ba4332019-03-13 02:40:07 -05001579 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1580 memset(&qi_iov, 0, sizeof(qi_iov));
1581 rqst[1].rq_iov = qi_iov;
1582 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001583
Steve French31ba4332019-03-13 02:40:07 -05001584 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
1585 COMPOUND_FID, qi.file_info_class,
1586 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001587 qi.input_buffer_length,
1588 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001589 } else { /* unknown flags */
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001590 cifs_tcon_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
Steve French31ba4332019-03-13 02:40:07 -05001591 rc = -EINVAL;
1592 }
1593
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001594 if (rc)
1595 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001596 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001597 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001598
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001599 /* Close */
1600 memset(&close_iov, 0, sizeof(close_iov));
1601 rqst[2].rq_iov = close_iov;
1602 rqst[2].rq_nvec = 1;
1603
Steve French43f8a6a2019-12-02 21:46:54 -06001604 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001605 if (rc)
1606 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001607 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001608
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001609 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1610 resp_buftype, rsp_iov);
1611 if (rc)
1612 goto iqinf_exit;
Steve Frenchd2f15422019-09-22 00:55:46 -05001613
1614 /* No need to bump num_remote_opens since handle immediately closed */
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001615 if (qi.flags & PASSTHRU_FSCTL) {
1616 pqi = (struct smb_query_info __user *)arg;
1617 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1618 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1619 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001620 if (qi.input_buffer_length > 0 &&
Markus Elfring2b1116b2019-11-05 22:26:53 +01001621 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1622 > rsp_iov[1].iov_len)
1623 goto e_fault;
1624
1625 if (copy_to_user(&pqi->input_buffer_length,
1626 &qi.input_buffer_length,
1627 sizeof(qi.input_buffer_length)))
1628 goto e_fault;
1629
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001630 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1631 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
Markus Elfring2b1116b2019-11-05 22:26:53 +01001632 qi.input_buffer_length))
1633 goto e_fault;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001634 } else {
1635 pqi = (struct smb_query_info __user *)arg;
1636 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1637 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1638 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
Markus Elfring2b1116b2019-11-05 22:26:53 +01001639 if (copy_to_user(&pqi->input_buffer_length,
1640 &qi.input_buffer_length,
1641 sizeof(qi.input_buffer_length)))
1642 goto e_fault;
1643
1644 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1645 qi.input_buffer_length))
1646 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001647 }
1648
1649 iqinf_exit:
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001650 kfree(buffer);
1651 SMB2_open_free(&rqst[0]);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001652 if (qi.flags & PASSTHRU_FSCTL)
1653 SMB2_ioctl_free(&rqst[1]);
1654 else
1655 SMB2_query_info_free(&rqst[1]);
1656
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001657 SMB2_close_free(&rqst[2]);
1658 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1659 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1660 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001661 return rc;
Markus Elfring2b1116b2019-11-05 22:26:53 +01001662
1663e_fault:
1664 rc = -EFAULT;
1665 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001666}
1667
Sachin Prabhu620d8742017-02-10 16:03:51 +05301668static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001669smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001670 struct cifsFileInfo *srcfile,
1671 struct cifsFileInfo *trgtfile, u64 src_off,
1672 u64 len, u64 dest_off)
1673{
1674 int rc;
1675 unsigned int ret_data_len;
1676 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001677 struct copychunk_ioctl_rsp *retbuf = NULL;
1678 struct cifs_tcon *tcon;
1679 int chunks_copied = 0;
1680 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301681 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001682
1683 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1684
1685 if (pcchunk == NULL)
1686 return -ENOMEM;
1687
Christoph Probsta205d502019-05-08 21:36:25 +02001688 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001689 /* Request a key from the server to identify the source of the copy */
1690 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1691 srcfile->fid.persistent_fid,
1692 srcfile->fid.volatile_fid, pcchunk);
1693
1694 /* Note: request_res_key sets res_key null only if rc !=0 */
1695 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001696 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001697
1698 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001699 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001700 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001701 pcchunk->Reserved2 = 0;
1702
Steve French9bf0c9c2013-11-16 18:05:28 -06001703 tcon = tlink_tcon(trgtfile->tlink);
1704
1705 while (len > 0) {
1706 pcchunk->SourceOffset = cpu_to_le64(src_off);
1707 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1708 pcchunk->Length =
1709 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1710
1711 /* Request server copy to target from src identified by key */
1712 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001713 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001714 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001715 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1716 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001717 if (rc == 0) {
1718 if (ret_data_len !=
1719 sizeof(struct copychunk_ioctl_rsp)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001720 cifs_tcon_dbg(VFS, "invalid cchunk response size\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001721 rc = -EIO;
1722 goto cchunk_out;
1723 }
1724 if (retbuf->TotalBytesWritten == 0) {
1725 cifs_dbg(FYI, "no bytes copied\n");
1726 rc = -EIO;
1727 goto cchunk_out;
1728 }
1729 /*
1730 * Check if server claimed to write more than we asked
1731 */
1732 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1733 le32_to_cpu(pcchunk->Length)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001734 cifs_tcon_dbg(VFS, "invalid copy chunk response\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001735 rc = -EIO;
1736 goto cchunk_out;
1737 }
1738 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001739 cifs_tcon_dbg(VFS, "invalid num chunks written\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001740 rc = -EIO;
1741 goto cchunk_out;
1742 }
1743 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001744
Sachin Prabhu620d8742017-02-10 16:03:51 +05301745 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1746 src_off += bytes_written;
1747 dest_off += bytes_written;
1748 len -= bytes_written;
1749 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001750
Sachin Prabhu620d8742017-02-10 16:03:51 +05301751 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001752 le32_to_cpu(retbuf->ChunksWritten),
1753 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301754 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001755 } else if (rc == -EINVAL) {
1756 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1757 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001758
Steve French9bf0c9c2013-11-16 18:05:28 -06001759 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1760 le32_to_cpu(retbuf->ChunksWritten),
1761 le32_to_cpu(retbuf->ChunkBytesWritten),
1762 le32_to_cpu(retbuf->TotalBytesWritten));
1763
1764 /*
1765 * Check if this is the first request using these sizes,
1766 * (ie check if copy succeed once with original sizes
1767 * and check if the server gave us different sizes after
1768 * we already updated max sizes on previous request).
1769 * if not then why is the server returning an error now
1770 */
1771 if ((chunks_copied != 0) || chunk_sizes_updated)
1772 goto cchunk_out;
1773
1774 /* Check that server is not asking us to grow size */
1775 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1776 tcon->max_bytes_chunk)
1777 tcon->max_bytes_chunk =
1778 le32_to_cpu(retbuf->ChunkBytesWritten);
1779 else
1780 goto cchunk_out; /* server gave us bogus size */
1781
1782 /* No need to change MaxChunks since already set to 1 */
1783 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001784 } else
1785 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001786 }
1787
1788cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001789 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001790 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301791 if (rc)
1792 return rc;
1793 else
1794 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001795}
1796
1797static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001798smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1799 struct cifs_fid *fid)
1800{
1801 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1802}
1803
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001804static unsigned int
1805smb2_read_data_offset(char *buf)
1806{
1807 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001808
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001809 return rsp->DataOffset;
1810}
1811
1812static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001813smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001814{
1815 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001816
1817 if (in_remaining)
1818 return le32_to_cpu(rsp->DataRemaining);
1819
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001820 return le32_to_cpu(rsp->DataLength);
1821}
1822
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001823
1824static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001825smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001826 struct cifs_io_parms *parms, unsigned int *bytes_read,
1827 char **buf, int *buf_type)
1828{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001829 parms->persistent_fid = pfid->persistent_fid;
1830 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001831 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1832}
1833
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001834static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001835smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001836 struct cifs_io_parms *parms, unsigned int *written,
1837 struct kvec *iov, unsigned long nr_segs)
1838{
1839
Steve Frenchdb8b6312014-09-22 05:13:55 -05001840 parms->persistent_fid = pfid->persistent_fid;
1841 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001842 return SMB2_write(xid, parms, written, iov, nr_segs);
1843}
1844
Steve Frenchd43cc792014-08-13 17:16:29 -05001845/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1846static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1847 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1848{
1849 struct cifsInodeInfo *cifsi;
1850 int rc;
1851
1852 cifsi = CIFS_I(inode);
1853
1854 /* if file already sparse don't bother setting sparse again */
1855 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1856 return true; /* already sparse */
1857
1858 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1859 return true; /* already not sparse */
1860
1861 /*
1862 * Can't check for sparse support on share the usual way via the
1863 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1864 * since Samba server doesn't set the flag on the share, yet
1865 * supports the set sparse FSCTL and returns sparse correctly
1866 * in the file attributes. If we fail setting sparse though we
1867 * mark that server does not support sparse files for this share
1868 * to avoid repeatedly sending the unsupported fsctl to server
1869 * if the file is repeatedly extended.
1870 */
1871 if (tcon->broken_sparse_sup)
1872 return false;
1873
1874 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1875 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001876 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05001877 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001878 if (rc) {
1879 tcon->broken_sparse_sup = true;
1880 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1881 return false;
1882 }
1883
1884 if (setsparse)
1885 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1886 else
1887 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1888
1889 return true;
1890}
1891
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001892static int
1893smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1894 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1895{
1896 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001897 struct inode *inode;
1898
1899 /*
1900 * If extending file more than one page make sparse. Many Linux fs
1901 * make files sparse by default when extending via ftruncate
1902 */
David Howells2b0143b2015-03-17 22:25:59 +00001903 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001904
1905 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001906 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001907
Steve Frenchd43cc792014-08-13 17:16:29 -05001908 /* whether set sparse succeeds or not, extend the file */
1909 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001910 }
1911
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001912 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001913 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001914}
1915
Steve French02b16662015-06-27 21:18:36 -07001916static int
1917smb2_duplicate_extents(const unsigned int xid,
1918 struct cifsFileInfo *srcfile,
1919 struct cifsFileInfo *trgtfile, u64 src_off,
1920 u64 len, u64 dest_off)
1921{
1922 int rc;
1923 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001924 struct duplicate_extents_to_file dup_ext_buf;
1925 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1926
1927 /* server fileays advertise duplicate extent support with this flag */
1928 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1929 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1930 return -EOPNOTSUPP;
1931
1932 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1933 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1934 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1935 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1936 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02001937 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07001938 src_off, dest_off, len);
1939
1940 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1941 if (rc)
1942 goto duplicate_extents_out;
1943
1944 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1945 trgtfile->fid.volatile_fid,
1946 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001947 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001948 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001949 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05001950 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07001951 &ret_data_len);
1952
1953 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02001954 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07001955
1956duplicate_extents_out:
1957 return rc;
1958}
Steve French02b16662015-06-27 21:18:36 -07001959
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001960static int
Steve French64a5cfa2013-10-14 15:31:32 -05001961smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1962 struct cifsFileInfo *cfile)
1963{
1964 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1965 cfile->fid.volatile_fid);
1966}
1967
1968static int
Steve Frenchb3152e22015-06-24 03:17:02 -05001969smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1970 struct cifsFileInfo *cfile)
1971{
1972 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05001973 unsigned int ret_data_len;
1974
1975 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1976 integr_info.Flags = 0;
1977 integr_info.Reserved = 0;
1978
1979 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1980 cfile->fid.volatile_fid,
1981 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001982 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001983 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05001984 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05001985 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05001986 &ret_data_len);
1987
1988}
1989
Steve Frenche02789a2018-08-09 14:33:12 -05001990/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1991#define GMT_TOKEN_SIZE 50
1992
Steve French153322f2019-03-28 22:32:49 -05001993#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1994
Steve Frenche02789a2018-08-09 14:33:12 -05001995/*
1996 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1997 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1998 */
Steve Frenchb3152e22015-06-24 03:17:02 -05001999static int
Steve French834170c2016-09-30 21:14:26 -05002000smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2001 struct cifsFileInfo *cfile, void __user *ioc_buf)
2002{
2003 char *retbuf = NULL;
2004 unsigned int ret_data_len = 0;
2005 int rc;
Steve French153322f2019-03-28 22:32:49 -05002006 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05002007 struct smb_snapshot_array snapshot_in;
2008
Steve French973189a2019-04-04 00:41:04 -05002009 /*
2010 * On the first query to enumerate the list of snapshots available
2011 * for this volume the buffer begins with 0 (number of snapshots
2012 * which can be returned is zero since at that point we do not know
2013 * how big the buffer needs to be). On the second query,
2014 * it (ret_data_len) is set to number of snapshots so we can
2015 * know to set the maximum response size larger (see below).
2016 */
Steve French153322f2019-03-28 22:32:49 -05002017 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
2018 return -EFAULT;
2019
2020 /*
2021 * Note that for snapshot queries that servers like Azure expect that
2022 * the first query be minimal size (and just used to get the number/size
2023 * of previous versions) so response size must be specified as EXACTLY
2024 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2025 * of eight bytes.
2026 */
2027 if (ret_data_len == 0)
2028 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
2029 else
2030 max_response_size = CIFSMaxBufSize;
2031
Steve French834170c2016-09-30 21:14:26 -05002032 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2033 cfile->fid.volatile_fid,
2034 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002035 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002036 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05002037 (char **)&retbuf,
2038 &ret_data_len);
2039 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
2040 rc, ret_data_len);
2041 if (rc)
2042 return rc;
2043
2044 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
2045 /* Fixup buffer */
2046 if (copy_from_user(&snapshot_in, ioc_buf,
2047 sizeof(struct smb_snapshot_array))) {
2048 rc = -EFAULT;
2049 kfree(retbuf);
2050 return rc;
2051 }
Steve French834170c2016-09-30 21:14:26 -05002052
Steve Frenche02789a2018-08-09 14:33:12 -05002053 /*
2054 * Check for min size, ie not large enough to fit even one GMT
2055 * token (snapshot). On the first ioctl some users may pass in
2056 * smaller size (or zero) to simply get the size of the array
2057 * so the user space caller can allocate sufficient memory
2058 * and retry the ioctl again with larger array size sufficient
2059 * to hold all of the snapshot GMT tokens on the second try.
2060 */
2061 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2062 ret_data_len = sizeof(struct smb_snapshot_array);
2063
2064 /*
2065 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2066 * the snapshot array (of 50 byte GMT tokens) each
2067 * representing an available previous version of the data
2068 */
2069 if (ret_data_len > (snapshot_in.snapshot_array_size +
2070 sizeof(struct smb_snapshot_array)))
2071 ret_data_len = snapshot_in.snapshot_array_size +
2072 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05002073
2074 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2075 rc = -EFAULT;
2076 }
2077
2078 kfree(retbuf);
2079 return rc;
2080}
2081
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002082
2083
2084static int
2085smb3_notify(const unsigned int xid, struct file *pfile,
2086 void __user *ioc_buf)
2087{
2088 struct smb3_notify notify;
2089 struct dentry *dentry = pfile->f_path.dentry;
2090 struct inode *inode = file_inode(pfile);
2091 struct cifs_sb_info *cifs_sb;
2092 struct cifs_open_parms oparms;
2093 struct cifs_fid fid;
2094 struct cifs_tcon *tcon;
2095 unsigned char *path = NULL;
2096 __le16 *utf16_path = NULL;
2097 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2098 int rc = 0;
2099
2100 path = build_path_from_dentry(dentry);
2101 if (path == NULL)
2102 return -ENOMEM;
2103
2104 cifs_sb = CIFS_SB(inode->i_sb);
2105
2106 utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
2107 if (utf16_path == NULL) {
2108 rc = -ENOMEM;
2109 goto notify_exit;
2110 }
2111
2112 if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
2113 rc = -EFAULT;
2114 goto notify_exit;
2115 }
2116
2117 tcon = cifs_sb_master_tcon(cifs_sb);
2118 oparms.tcon = tcon;
2119 oparms.desired_access = FILE_READ_ATTRIBUTES;
2120 oparms.disposition = FILE_OPEN;
2121 oparms.create_options = cifs_create_options(cifs_sb, 0);
2122 oparms.fid = &fid;
2123 oparms.reconnect = false;
2124
2125 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
2126 if (rc)
2127 goto notify_exit;
2128
2129 rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
2130 notify.watch_tree, notify.completion_filter);
2131
2132 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2133
2134 cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
2135
2136notify_exit:
2137 kfree(path);
2138 kfree(utf16_path);
2139 return rc;
2140}
2141
Steve French834170c2016-09-30 21:14:26 -05002142static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002143smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2144 const char *path, struct cifs_sb_info *cifs_sb,
2145 struct cifs_fid *fid, __u16 search_flags,
2146 struct cifs_search_info *srch_inf)
2147{
2148 __le16 *utf16_path;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002149 struct smb_rqst rqst[2];
2150 struct kvec rsp_iov[2];
2151 int resp_buftype[2];
2152 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2153 struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
2154 int rc, flags = 0;
2155 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002156 struct cifs_open_parms oparms;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002157 struct smb2_query_directory_rsp *qd_rsp = NULL;
2158 struct smb2_create_rsp *op_rsp = NULL;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002159
2160 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2161 if (!utf16_path)
2162 return -ENOMEM;
2163
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002164 if (smb3_encryption_required(tcon))
2165 flags |= CIFS_TRANSFORM_REQ;
2166
2167 memset(rqst, 0, sizeof(rqst));
2168 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
2169 memset(rsp_iov, 0, sizeof(rsp_iov));
2170
2171 /* Open */
2172 memset(&open_iov, 0, sizeof(open_iov));
2173 rqst[0].rq_iov = open_iov;
2174 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2175
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002176 oparms.tcon = tcon;
2177 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2178 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002179 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002180 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002181 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002182
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002183 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
2184 if (rc)
2185 goto qdf_free;
2186 smb2_set_next_command(tcon, &rqst[0]);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002187
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002188 /* Query directory */
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002189 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02002190 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002191
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002192 memset(&qd_iov, 0, sizeof(qd_iov));
2193 rqst[1].rq_iov = qd_iov;
2194 rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
2195
2196 rc = SMB2_query_directory_init(xid, tcon, &rqst[1],
2197 COMPOUND_FID, COMPOUND_FID,
2198 0, srch_inf->info_level);
2199 if (rc)
2200 goto qdf_free;
2201
2202 smb2_set_related(&rqst[1]);
2203
2204 rc = compound_send_recv(xid, tcon->ses, flags, 2, rqst,
2205 resp_buftype, rsp_iov);
2206
2207 /* If the open failed there is nothing to do */
2208 op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
2209 if (op_rsp == NULL || op_rsp->sync_hdr.Status != STATUS_SUCCESS) {
2210 cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
2211 goto qdf_free;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002212 }
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002213 fid->persistent_fid = op_rsp->PersistentFileId;
2214 fid->volatile_fid = op_rsp->VolatileFileId;
2215
2216 /* Anything else than ENODATA means a genuine error */
2217 if (rc && rc != -ENODATA) {
2218 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2219 cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
2220 trace_smb3_query_dir_err(xid, fid->persistent_fid,
2221 tcon->tid, tcon->ses->Suid, 0, 0, rc);
2222 goto qdf_free;
2223 }
2224
Shyam Prasad N1be1fa42020-03-09 01:35:09 -07002225 atomic_inc(&tcon->num_remote_opens);
2226
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002227 qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
2228 if (qd_rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
2229 trace_smb3_query_dir_done(xid, fid->persistent_fid,
2230 tcon->tid, tcon->ses->Suid, 0, 0);
2231 srch_inf->endOfSearch = true;
2232 rc = 0;
2233 goto qdf_free;
2234 }
2235
2236 rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
2237 srch_inf);
2238 if (rc) {
2239 trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
2240 tcon->ses->Suid, 0, 0, rc);
2241 goto qdf_free;
2242 }
2243 resp_buftype[1] = CIFS_NO_BUFFER;
2244
2245 trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
2246 tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
2247
2248 qdf_free:
2249 kfree(utf16_path);
2250 SMB2_open_free(&rqst[0]);
2251 SMB2_query_directory_free(&rqst[1]);
2252 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2253 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002254 return rc;
2255}
2256
2257static int
2258smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2259 struct cifs_fid *fid, __u16 search_flags,
2260 struct cifs_search_info *srch_inf)
2261{
2262 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2263 fid->volatile_fid, 0, srch_inf);
2264}
2265
2266static int
2267smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2268 struct cifs_fid *fid)
2269{
2270 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2271}
2272
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002273/*
Christoph Probsta205d502019-05-08 21:36:25 +02002274 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2275 * the number of credits and return true. Otherwise - return false.
2276 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002277static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002278smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002279{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002280 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002281
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002282 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002283 return false;
2284
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002285 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002286 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002287 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002288 spin_unlock(&server->req_lock);
2289 wake_up(&server->request_q);
2290 }
2291
2292 return true;
2293}
2294
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002295static bool
2296smb2_is_session_expired(char *buf)
2297{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002298 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002299
Mark Symsd81243c2018-05-24 09:47:31 +01002300 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2301 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002302 return false;
2303
Steve Frenche68a9322018-07-30 14:23:58 -05002304 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2305 le16_to_cpu(shdr->Command),
2306 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002307 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002308
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002309 return true;
2310}
2311
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002312static int
2313smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2314 struct cifsInodeInfo *cinode)
2315{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002316 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2317 return SMB2_lease_break(0, tcon, cinode->lease_key,
2318 smb2_get_lease_state(cinode));
2319
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002320 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2321 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002322 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002323}
2324
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002325void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002326smb2_set_related(struct smb_rqst *rqst)
2327{
2328 struct smb2_sync_hdr *shdr;
2329
2330 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002331 if (shdr == NULL) {
2332 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2333 return;
2334 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002335 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2336}
2337
2338char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2339
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002340void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002341smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002342{
2343 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002344 struct cifs_ses *ses = tcon->ses;
2345 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002346 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002347 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002348
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002349 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2350 if (shdr == NULL) {
2351 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2352 return;
2353 }
2354
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002355 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002356
2357 /* No padding needed */
2358 if (!(len & 7))
2359 goto finished;
2360
2361 num_padding = 8 - (len & 7);
2362 if (!smb3_encryption_required(tcon)) {
2363 /*
2364 * If we do not have encryption then we can just add an extra
2365 * iov for the padding.
2366 */
2367 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2368 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2369 rqst->rq_nvec++;
2370 len += num_padding;
2371 } else {
2372 /*
2373 * We can not add a small padding iov for the encryption case
2374 * because the encryption framework can not handle the padding
2375 * iovs.
2376 * We have to flatten this into a single buffer and add
2377 * the padding to it.
2378 */
2379 for (i = 1; i < rqst->rq_nvec; i++) {
2380 memcpy(rqst->rq_iov[0].iov_base +
2381 rqst->rq_iov[0].iov_len,
2382 rqst->rq_iov[i].iov_base,
2383 rqst->rq_iov[i].iov_len);
2384 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002385 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002386 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2387 0, num_padding);
2388 rqst->rq_iov[0].iov_len += num_padding;
2389 len += num_padding;
2390 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002391 }
2392
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002393 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002394 shdr->NextCommand = cpu_to_le32(len);
2395}
2396
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002397/*
2398 * Passes the query info response back to the caller on success.
2399 * Caller need to free this with free_rsp_buf().
2400 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002401int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002402smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2403 __le16 *utf16_path, u32 desired_access,
2404 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002405 struct kvec *rsp, int *buftype,
2406 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002407{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002408 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002409 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002410 struct smb_rqst rqst[3];
2411 int resp_buftype[3];
2412 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002413 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002414 struct kvec qi_iov[1];
2415 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002416 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002417 struct cifs_open_parms oparms;
2418 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002419 int rc;
2420
2421 if (smb3_encryption_required(tcon))
2422 flags |= CIFS_TRANSFORM_REQ;
2423
2424 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002425 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002426 memset(rsp_iov, 0, sizeof(rsp_iov));
2427
2428 memset(&open_iov, 0, sizeof(open_iov));
2429 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002430 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002431
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002432 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002433 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002434 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002435 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002436 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002437 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002438
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002439 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002440 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002441 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002442 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002443
2444 memset(&qi_iov, 0, sizeof(qi_iov));
2445 rqst[1].rq_iov = qi_iov;
2446 rqst[1].rq_nvec = 1;
2447
2448 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002449 class, type, 0,
2450 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002451 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002452 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002453 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002454 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002455 smb2_set_related(&rqst[1]);
2456
2457 memset(&close_iov, 0, sizeof(close_iov));
2458 rqst[2].rq_iov = close_iov;
2459 rqst[2].rq_nvec = 1;
2460
Steve French43f8a6a2019-12-02 21:46:54 -06002461 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002462 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002463 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002464 smb2_set_related(&rqst[2]);
2465
2466 rc = compound_send_recv(xid, ses, flags, 3, rqst,
2467 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002468 if (rc) {
2469 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French7dcc82c2019-09-11 00:07:36 -05002470 if (rc == -EREMCHG) {
2471 tcon->need_reconnect = true;
2472 printk_once(KERN_WARNING "server share %s deleted\n",
2473 tcon->treeName);
2474 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002475 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002476 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002477 *rsp = rsp_iov[1];
2478 *buftype = resp_buftype[1];
2479
2480 qic_exit:
2481 SMB2_open_free(&rqst[0]);
2482 SMB2_query_info_free(&rqst[1]);
2483 SMB2_close_free(&rqst[2]);
2484 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2485 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2486 return rc;
2487}
2488
2489static int
2490smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002491 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002492{
2493 struct smb2_query_info_rsp *rsp;
2494 struct smb2_fs_full_size_info *info = NULL;
2495 __le16 utf16_path = 0; /* Null - open root of share */
2496 struct kvec rsp_iov = {NULL, 0};
2497 int buftype = CIFS_NO_BUFFER;
2498 int rc;
2499
2500
2501 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2502 FILE_READ_ATTRIBUTES,
2503 FS_FULL_SIZE_INFORMATION,
2504 SMB2_O_INFO_FILESYSTEM,
2505 sizeof(struct smb2_fs_full_size_info),
Steve French87f93d82020-02-04 13:02:59 -06002506 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002507 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002508 goto qfs_exit;
2509
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002510 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002511 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002512 info = (struct smb2_fs_full_size_info *)(
2513 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2514 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2515 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002516 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002517 sizeof(struct smb2_fs_full_size_info));
2518 if (!rc)
2519 smb2_copy_fs_info_to_kstatfs(info, buf);
2520
2521qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002522 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002523 return rc;
2524}
2525
Steve French2d304212018-06-24 23:28:12 -05002526static int
2527smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002528 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Steve French2d304212018-06-24 23:28:12 -05002529{
2530 int rc;
2531 __le16 srch_path = 0; /* Null - open root of share */
2532 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2533 struct cifs_open_parms oparms;
2534 struct cifs_fid fid;
2535
2536 if (!tcon->posix_extensions)
Amir Goldstein0f060932020-02-03 21:46:43 +02002537 return smb2_queryfs(xid, tcon, cifs_sb, buf);
Steve French2d304212018-06-24 23:28:12 -05002538
2539 oparms.tcon = tcon;
2540 oparms.desired_access = FILE_READ_ATTRIBUTES;
2541 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002542 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French2d304212018-06-24 23:28:12 -05002543 oparms.fid = &fid;
2544 oparms.reconnect = false;
2545
2546 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
2547 if (rc)
2548 return rc;
2549
2550 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2551 fid.volatile_fid, buf);
2552 buf->f_type = SMB2_MAGIC_NUMBER;
2553 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2554 return rc;
2555}
Steve French2d304212018-06-24 23:28:12 -05002556
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002557static bool
2558smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2559{
2560 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2561 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2562}
2563
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002564static int
2565smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2566 __u64 length, __u32 type, int lock, int unlock, bool wait)
2567{
2568 if (unlock && !lock)
2569 type = SMB2_LOCKFLAG_UNLOCK;
2570 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2571 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2572 current->tgid, length, offset, type, wait);
2573}
2574
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002575static void
2576smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2577{
2578 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2579}
2580
2581static void
2582smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2583{
2584 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2585}
2586
2587static void
2588smb2_new_lease_key(struct cifs_fid *fid)
2589{
Steve Frenchfa70b872016-09-22 00:39:34 -05002590 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002591}
2592
Aurelien Aptel9d496402017-02-13 16:16:49 +01002593static int
2594smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2595 const char *search_name,
2596 struct dfs_info3_param **target_nodes,
2597 unsigned int *num_of_nodes,
2598 const struct nls_table *nls_codepage, int remap)
2599{
2600 int rc;
2601 __le16 *utf16_path = NULL;
2602 int utf16_path_len = 0;
2603 struct cifs_tcon *tcon;
2604 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2605 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2606 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2607
Christoph Probsta205d502019-05-08 21:36:25 +02002608 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002609
2610 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002611 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002612 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002613 tcon = ses->tcon_ipc;
2614 if (tcon == NULL) {
2615 spin_lock(&cifs_tcp_ses_lock);
2616 tcon = list_first_entry_or_null(&ses->tcon_list,
2617 struct cifs_tcon,
2618 tcon_list);
2619 if (tcon)
2620 tcon->tc_count++;
2621 spin_unlock(&cifs_tcp_ses_lock);
2622 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002623
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002624 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002625 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2626 ses);
2627 rc = -ENOTCONN;
2628 goto out;
2629 }
2630
2631 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2632 &utf16_path_len,
2633 nls_codepage, remap);
2634 if (!utf16_path) {
2635 rc = -ENOMEM;
2636 goto out;
2637 }
2638
2639 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2640 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2641 if (!dfs_req) {
2642 rc = -ENOMEM;
2643 goto out;
2644 }
2645
2646 /* Highest DFS referral version understood */
2647 dfs_req->MaxReferralLevel = DFS_VERSION;
2648
2649 /* Path to resolve in an UTF-16 null-terminated string */
2650 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2651
2652 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002653 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2654 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002655 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002656 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002657 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002658 } while (rc == -EAGAIN);
2659
2660 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002661 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002662 cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002663 goto out;
2664 }
2665
2666 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2667 num_of_nodes, target_nodes,
2668 nls_codepage, remap, search_name,
2669 true /* is_unicode */);
2670 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002671 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002672 goto out;
2673 }
2674
2675 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002676 if (tcon && !tcon->ipc) {
2677 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002678 spin_lock(&cifs_tcp_ses_lock);
2679 tcon->tc_count--;
2680 spin_unlock(&cifs_tcp_ses_lock);
2681 }
2682 kfree(utf16_path);
2683 kfree(dfs_req);
2684 kfree(dfs_rsp);
2685 return rc;
2686}
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002687
2688static int
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002689parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2690 u32 plen, char **target_path,
2691 struct cifs_sb_info *cifs_sb)
2692{
2693 unsigned int len;
2694
2695 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2696 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2697
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002698 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2699 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2700 le64_to_cpu(symlink_buf->InodeType));
2701 return -EOPNOTSUPP;
2702 }
2703
2704 *target_path = cifs_strndup_from_utf16(
2705 symlink_buf->PathBuffer,
2706 len, true, cifs_sb->local_nls);
2707 if (!(*target_path))
2708 return -ENOMEM;
2709
2710 convert_delimiter(*target_path, '/');
2711 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2712
2713 return 0;
2714}
2715
2716static int
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002717parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2718 u32 plen, char **target_path,
2719 struct cifs_sb_info *cifs_sb)
2720{
2721 unsigned int sub_len;
2722 unsigned int sub_offset;
2723
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002724 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002725
2726 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2727 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2728 if (sub_offset + 20 > plen ||
2729 sub_offset + sub_len + 20 > plen) {
2730 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2731 return -EIO;
2732 }
2733
2734 *target_path = cifs_strndup_from_utf16(
2735 symlink_buf->PathBuffer + sub_offset,
2736 sub_len, true, cifs_sb->local_nls);
2737 if (!(*target_path))
2738 return -ENOMEM;
2739
2740 convert_delimiter(*target_path, '/');
2741 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2742
2743 return 0;
2744}
2745
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002746static int
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002747parse_reparse_point(struct reparse_data_buffer *buf,
2748 u32 plen, char **target_path,
2749 struct cifs_sb_info *cifs_sb)
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002750{
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002751 if (plen < sizeof(struct reparse_data_buffer)) {
2752 cifs_dbg(VFS, "reparse buffer is too small. Must be "
2753 "at least 8 bytes but was %d\n", plen);
2754 return -EIO;
2755 }
2756
2757 if (plen < le16_to_cpu(buf->ReparseDataLength) +
2758 sizeof(struct reparse_data_buffer)) {
2759 cifs_dbg(VFS, "srv returned invalid reparse buf "
2760 "length: %d\n", plen);
2761 return -EIO;
2762 }
2763
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002764 /* See MS-FSCC 2.1.2 */
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002765 switch (le32_to_cpu(buf->ReparseTag)) {
2766 case IO_REPARSE_TAG_NFS:
2767 return parse_reparse_posix(
2768 (struct reparse_posix_data *)buf,
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002769 plen, target_path, cifs_sb);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002770 case IO_REPARSE_TAG_SYMLINK:
2771 return parse_reparse_symlink(
2772 (struct reparse_symlink_data_buffer *)buf,
2773 plen, target_path, cifs_sb);
2774 default:
2775 cifs_dbg(VFS, "srv returned unknown symlink buffer "
2776 "tag:0x%08x\n", le32_to_cpu(buf->ReparseTag));
2777 return -EOPNOTSUPP;
2778 }
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002779}
2780
Pavel Shilovsky78932422016-07-24 10:37:38 +03002781#define SMB2_SYMLINK_STRUCT_SIZE \
2782 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2783
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002784static int
2785smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002786 struct cifs_sb_info *cifs_sb, const char *full_path,
2787 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002788{
2789 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002790 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002791 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2792 struct cifs_open_parms oparms;
2793 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002794 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002795 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002796 struct smb2_symlink_err_rsp *symlink;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002797 unsigned int sub_len;
2798 unsigned int sub_offset;
2799 unsigned int print_len;
2800 unsigned int print_offset;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002801 int flags = 0;
2802 struct smb_rqst rqst[3];
2803 int resp_buftype[3];
2804 struct kvec rsp_iov[3];
2805 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2806 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
2807 struct kvec close_iov[1];
2808 struct smb2_create_rsp *create_rsp;
2809 struct smb2_ioctl_rsp *ioctl_rsp;
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002810 struct reparse_data_buffer *reparse_buf;
Amir Goldstein0f060932020-02-03 21:46:43 +02002811 int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002812 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002813
2814 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2815
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002816 *target_path = NULL;
2817
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002818 if (smb3_encryption_required(tcon))
2819 flags |= CIFS_TRANSFORM_REQ;
2820
2821 memset(rqst, 0, sizeof(rqst));
2822 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2823 memset(rsp_iov, 0, sizeof(rsp_iov));
2824
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002825 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2826 if (!utf16_path)
2827 return -ENOMEM;
2828
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002829 /* Open */
2830 memset(&open_iov, 0, sizeof(open_iov));
2831 rqst[0].rq_iov = open_iov;
2832 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2833
2834 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002835 oparms.tcon = tcon;
2836 oparms.desired_access = FILE_READ_ATTRIBUTES;
2837 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002838 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002839 oparms.fid = &fid;
2840 oparms.reconnect = false;
2841
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002842 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
2843 if (rc)
2844 goto querty_exit;
2845 smb2_set_next_command(tcon, &rqst[0]);
2846
2847
2848 /* IOCTL */
2849 memset(&io_iov, 0, sizeof(io_iov));
2850 rqst[1].rq_iov = io_iov;
2851 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
2852
2853 rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
2854 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10002855 true /* is_fctl */, NULL, 0,
2856 CIFSMaxBufSize -
2857 MAX_SMB2_CREATE_RESPONSE_SIZE -
2858 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002859 if (rc)
2860 goto querty_exit;
2861
2862 smb2_set_next_command(tcon, &rqst[1]);
2863 smb2_set_related(&rqst[1]);
2864
2865
2866 /* Close */
2867 memset(&close_iov, 0, sizeof(close_iov));
2868 rqst[2].rq_iov = close_iov;
2869 rqst[2].rq_nvec = 1;
2870
Steve French43f8a6a2019-12-02 21:46:54 -06002871 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002872 if (rc)
2873 goto querty_exit;
2874
2875 smb2_set_related(&rqst[2]);
2876
2877 rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
2878 resp_buftype, rsp_iov);
2879
2880 create_rsp = rsp_iov[0].iov_base;
2881 if (create_rsp && create_rsp->sync_hdr.Status)
2882 err_iov = rsp_iov[0];
2883 ioctl_rsp = rsp_iov[1].iov_base;
2884
2885 /*
2886 * Open was successful and we got an ioctl response.
2887 */
2888 if ((rc == 0) && (is_reparse_point)) {
2889 /* See MS-FSCC 2.3.23 */
2890
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002891 reparse_buf = (struct reparse_data_buffer *)
2892 ((char *)ioctl_rsp +
2893 le32_to_cpu(ioctl_rsp->OutputOffset));
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002894 plen = le32_to_cpu(ioctl_rsp->OutputCount);
2895
2896 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
2897 rsp_iov[1].iov_len) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002898 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002899 plen);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002900 rc = -EIO;
2901 goto querty_exit;
2902 }
2903
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002904 rc = parse_reparse_point(reparse_buf, plen, target_path,
2905 cifs_sb);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002906 goto querty_exit;
2907 }
2908
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002909 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002910 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002911 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002912 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002913
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002914 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002915 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002916 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002917 rc = -EINVAL;
2918 goto querty_exit;
2919 }
2920
2921 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2922 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
2923 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
2924 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002925 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002926 }
2927
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002928 /* open must fail on symlink - reset rc */
2929 rc = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002930 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2931 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002932 print_len = le16_to_cpu(symlink->PrintNameLength);
2933 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2934
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002935 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002936 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002937 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002938 }
2939
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002940 if (err_iov.iov_len <
2941 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002942 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002943 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002944 }
2945
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002946 *target_path = cifs_strndup_from_utf16(
2947 (char *)symlink->PathBuffer + sub_offset,
2948 sub_len, true, cifs_sb->local_nls);
2949 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002950 rc = -ENOMEM;
2951 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002952 }
2953 convert_delimiter(*target_path, '/');
2954 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002955
2956 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002957 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002958 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002959 SMB2_open_free(&rqst[0]);
2960 SMB2_ioctl_free(&rqst[1]);
2961 SMB2_close_free(&rqst[2]);
2962 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2963 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2964 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002965 return rc;
2966}
2967
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002968static struct cifs_ntsd *
2969get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2970 const struct cifs_fid *cifsfid, u32 *pacllen)
2971{
2972 struct cifs_ntsd *pntsd = NULL;
2973 unsigned int xid;
2974 int rc = -EOPNOTSUPP;
2975 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2976
2977 if (IS_ERR(tlink))
2978 return ERR_CAST(tlink);
2979
2980 xid = get_xid();
2981 cifs_dbg(FYI, "trying to get acl\n");
2982
2983 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2984 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2985 free_xid(xid);
2986
2987 cifs_put_tlink(tlink);
2988
2989 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2990 if (rc)
2991 return ERR_PTR(rc);
2992 return pntsd;
2993
2994}
2995
2996static struct cifs_ntsd *
2997get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2998 const char *path, u32 *pacllen)
2999{
3000 struct cifs_ntsd *pntsd = NULL;
3001 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3002 unsigned int xid;
3003 int rc;
3004 struct cifs_tcon *tcon;
3005 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3006 struct cifs_fid fid;
3007 struct cifs_open_parms oparms;
3008 __le16 *utf16_path;
3009
3010 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
3011 if (IS_ERR(tlink))
3012 return ERR_CAST(tlink);
3013
3014 tcon = tlink_tcon(tlink);
3015 xid = get_xid();
3016
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003017 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05003018 if (!utf16_path) {
3019 rc = -ENOMEM;
3020 free_xid(xid);
3021 return ERR_PTR(rc);
3022 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003023
3024 oparms.tcon = tcon;
3025 oparms.desired_access = READ_CONTROL;
3026 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02003027 oparms.create_options = cifs_create_options(cifs_sb, 0);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003028 oparms.fid = &fid;
3029 oparms.reconnect = false;
3030
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003031 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003032 kfree(utf16_path);
3033 if (!rc) {
3034 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3035 fid.volatile_fid, (void **)&pntsd, pacllen);
3036 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3037 }
3038
3039 cifs_put_tlink(tlink);
3040 free_xid(xid);
3041
3042 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3043 if (rc)
3044 return ERR_PTR(rc);
3045 return pntsd;
3046}
3047
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003048static int
3049set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
3050 struct inode *inode, const char *path, int aclflag)
3051{
3052 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3053 unsigned int xid;
3054 int rc, access_flags = 0;
3055 struct cifs_tcon *tcon;
3056 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3057 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3058 struct cifs_fid fid;
3059 struct cifs_open_parms oparms;
3060 __le16 *utf16_path;
3061
3062 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
3063 if (IS_ERR(tlink))
3064 return PTR_ERR(tlink);
3065
3066 tcon = tlink_tcon(tlink);
3067 xid = get_xid();
3068
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003069 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
3070 access_flags = WRITE_OWNER;
3071 else
3072 access_flags = WRITE_DAC;
3073
3074 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05003075 if (!utf16_path) {
3076 rc = -ENOMEM;
3077 free_xid(xid);
3078 return rc;
3079 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003080
3081 oparms.tcon = tcon;
3082 oparms.desired_access = access_flags;
Amir Goldstein0f060932020-02-03 21:46:43 +02003083 oparms.create_options = cifs_create_options(cifs_sb, 0);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003084 oparms.disposition = FILE_OPEN;
3085 oparms.path = path;
3086 oparms.fid = &fid;
3087 oparms.reconnect = false;
3088
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003089 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003090 kfree(utf16_path);
3091 if (!rc) {
3092 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3093 fid.volatile_fid, pnntsd, acllen, aclflag);
3094 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3095 }
3096
3097 cifs_put_tlink(tlink);
3098 free_xid(xid);
3099 return rc;
3100}
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003101
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003102/* Retrieve an ACL from the server */
3103static struct cifs_ntsd *
3104get_smb2_acl(struct cifs_sb_info *cifs_sb,
3105 struct inode *inode, const char *path,
3106 u32 *pacllen)
3107{
3108 struct cifs_ntsd *pntsd = NULL;
3109 struct cifsFileInfo *open_file = NULL;
3110
3111 if (inode)
3112 open_file = find_readable_file(CIFS_I(inode), true);
3113 if (!open_file)
3114 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
3115
3116 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
3117 cifsFileInfo_put(open_file);
3118 return pntsd;
3119}
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003120
Steve French30175622014-08-17 18:16:40 -05003121static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
3122 loff_t offset, loff_t len, bool keep_size)
3123{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003124 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05003125 struct inode *inode;
3126 struct cifsInodeInfo *cifsi;
3127 struct cifsFileInfo *cfile = file->private_data;
3128 struct file_zero_data_information fsctl_buf;
3129 long rc;
3130 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003131 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05003132
3133 xid = get_xid();
3134
David Howells2b0143b2015-03-17 22:25:59 +00003135 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05003136 cifsi = CIFS_I(inode);
3137
Christoph Probsta205d502019-05-08 21:36:25 +02003138 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05003139 ses->Suid, offset, len);
3140
3141
Steve French30175622014-08-17 18:16:40 -05003142 /* if file not oplocked can't be sure whether asking to extend size */
3143 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003144 if (keep_size == false) {
3145 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003146 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
3147 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003148 free_xid(xid);
3149 return rc;
3150 }
Steve French30175622014-08-17 18:16:40 -05003151
Steve Frenchd1c35af2019-05-09 00:09:37 -05003152 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05003153
3154 fsctl_buf.FileOffset = cpu_to_le64(offset);
3155 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3156
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003157 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3158 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
3159 (char *)&fsctl_buf,
3160 sizeof(struct file_zero_data_information),
3161 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003162 if (rc)
3163 goto zero_range_exit;
3164
3165 /*
3166 * do we also need to change the size of the file?
3167 */
3168 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003169 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003170 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3171 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003172 }
3173
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003174 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05003175 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05003176 if (rc)
3177 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3178 ses->Suid, offset, len, rc);
3179 else
3180 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3181 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05003182 return rc;
3183}
3184
Steve French31742c52014-08-17 08:38:47 -05003185static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3186 loff_t offset, loff_t len)
3187{
3188 struct inode *inode;
Steve French31742c52014-08-17 08:38:47 -05003189 struct cifsFileInfo *cfile = file->private_data;
3190 struct file_zero_data_information fsctl_buf;
3191 long rc;
3192 unsigned int xid;
3193 __u8 set_sparse = 1;
3194
3195 xid = get_xid();
3196
David Howells2b0143b2015-03-17 22:25:59 +00003197 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05003198
3199 /* Need to make file sparse, if not already, before freeing range. */
3200 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05003201 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3202 rc = -EOPNOTSUPP;
3203 free_xid(xid);
3204 return rc;
3205 }
Steve French31742c52014-08-17 08:38:47 -05003206
Christoph Probsta205d502019-05-08 21:36:25 +02003207 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05003208
3209 fsctl_buf.FileOffset = cpu_to_le64(offset);
3210 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3211
3212 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3213 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01003214 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05003215 sizeof(struct file_zero_data_information),
3216 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05003217 free_xid(xid);
3218 return rc;
3219}
3220
Steve French9ccf3212014-10-18 17:01:15 -05003221static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3222 loff_t off, loff_t len, bool keep_size)
3223{
3224 struct inode *inode;
3225 struct cifsInodeInfo *cifsi;
3226 struct cifsFileInfo *cfile = file->private_data;
3227 long rc = -EOPNOTSUPP;
3228 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003229 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05003230
3231 xid = get_xid();
3232
David Howells2b0143b2015-03-17 22:25:59 +00003233 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05003234 cifsi = CIFS_I(inode);
3235
Steve French779ede02019-03-13 01:41:49 -05003236 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3237 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003238 /* if file not oplocked can't be sure whether asking to extend size */
3239 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003240 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05003241 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3242 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003243 free_xid(xid);
3244 return rc;
3245 }
Steve French9ccf3212014-10-18 17:01:15 -05003246
3247 /*
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003248 * Extending the file
3249 */
3250 if ((keep_size == false) && i_size_read(inode) < off + len) {
3251 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0)
3252 smb2_set_sparse(xid, tcon, cfile, inode, false);
3253
3254 eof = cpu_to_le64(off + len);
3255 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3256 cfile->fid.volatile_fid, cfile->pid, &eof);
3257 if (rc == 0) {
3258 cifsi->server_eof = off + len;
3259 cifs_setsize(inode, off + len);
3260 cifs_truncate_page(inode->i_mapping, inode->i_size);
3261 truncate_setsize(inode, off + len);
3262 }
3263 goto out;
3264 }
3265
3266 /*
Steve French9ccf3212014-10-18 17:01:15 -05003267 * Files are non-sparse by default so falloc may be a no-op
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003268 * Must check if file sparse. If not sparse, and since we are not
3269 * extending then no need to do anything since file already allocated
Steve French9ccf3212014-10-18 17:01:15 -05003270 */
3271 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003272 rc = 0;
3273 goto out;
Steve French9ccf3212014-10-18 17:01:15 -05003274 }
3275
3276 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3277 /*
3278 * Check if falloc starts within first few pages of file
3279 * and ends within a few pages of the end of file to
3280 * ensure that most of file is being forced to be
3281 * fallocated now. If so then setting whole file sparse
3282 * ie potentially making a few extra pages at the beginning
3283 * or end of the file non-sparse via set_sparse is harmless.
3284 */
Steve Frenchcfe89092018-05-19 02:04:55 -05003285 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3286 rc = -EOPNOTSUPP;
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003287 goto out;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003288 }
Steve French9ccf3212014-10-18 17:01:15 -05003289 }
Steve French9ccf3212014-10-18 17:01:15 -05003290
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003291 smb2_set_sparse(xid, tcon, cfile, inode, false);
3292 rc = 0;
3293
3294out:
Steve French779ede02019-03-13 01:41:49 -05003295 if (rc)
3296 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3297 tcon->ses->Suid, off, len, rc);
3298 else
3299 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3300 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003301
3302 free_xid(xid);
3303 return rc;
3304}
3305
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003306static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3307{
3308 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3309 struct cifsInodeInfo *cifsi;
3310 struct inode *inode;
3311 int rc = 0;
3312 struct file_allocated_range_buffer in_data, *out_data = NULL;
3313 u32 out_data_len;
3314 unsigned int xid;
3315
3316 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3317 return generic_file_llseek(file, offset, whence);
3318
3319 inode = d_inode(cfile->dentry);
3320 cifsi = CIFS_I(inode);
3321
3322 if (offset < 0 || offset >= i_size_read(inode))
3323 return -ENXIO;
3324
3325 xid = get_xid();
3326 /*
3327 * We need to be sure that all dirty pages are written as they
3328 * might fill holes on the server.
3329 * Note that we also MUST flush any written pages since at least
3330 * some servers (Windows2016) will not reflect recent writes in
3331 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3332 */
Aurelien Aptel86f740f2020-02-21 11:19:06 +01003333 wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003334 if (wrcfile) {
3335 filemap_write_and_wait(inode->i_mapping);
3336 smb2_flush_file(xid, tcon, &wrcfile->fid);
3337 cifsFileInfo_put(wrcfile);
3338 }
3339
3340 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3341 if (whence == SEEK_HOLE)
3342 offset = i_size_read(inode);
3343 goto lseek_exit;
3344 }
3345
3346 in_data.file_offset = cpu_to_le64(offset);
3347 in_data.length = cpu_to_le64(i_size_read(inode));
3348
3349 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3350 cfile->fid.volatile_fid,
3351 FSCTL_QUERY_ALLOCATED_RANGES, true,
3352 (char *)&in_data, sizeof(in_data),
3353 sizeof(struct file_allocated_range_buffer),
3354 (char **)&out_data, &out_data_len);
3355 if (rc == -E2BIG)
3356 rc = 0;
3357 if (rc)
3358 goto lseek_exit;
3359
3360 if (whence == SEEK_HOLE && out_data_len == 0)
3361 goto lseek_exit;
3362
3363 if (whence == SEEK_DATA && out_data_len == 0) {
3364 rc = -ENXIO;
3365 goto lseek_exit;
3366 }
3367
3368 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3369 rc = -EINVAL;
3370 goto lseek_exit;
3371 }
3372 if (whence == SEEK_DATA) {
3373 offset = le64_to_cpu(out_data->file_offset);
3374 goto lseek_exit;
3375 }
3376 if (offset < le64_to_cpu(out_data->file_offset))
3377 goto lseek_exit;
3378
3379 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3380
3381 lseek_exit:
3382 free_xid(xid);
3383 kfree(out_data);
3384 if (!rc)
3385 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3386 else
3387 return rc;
3388}
3389
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003390static int smb3_fiemap(struct cifs_tcon *tcon,
3391 struct cifsFileInfo *cfile,
3392 struct fiemap_extent_info *fei, u64 start, u64 len)
3393{
3394 unsigned int xid;
3395 struct file_allocated_range_buffer in_data, *out_data;
3396 u32 out_data_len;
3397 int i, num, rc, flags, last_blob;
3398 u64 next;
3399
3400 if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
3401 return -EBADR;
3402
3403 xid = get_xid();
3404 again:
3405 in_data.file_offset = cpu_to_le64(start);
3406 in_data.length = cpu_to_le64(len);
3407
3408 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3409 cfile->fid.volatile_fid,
3410 FSCTL_QUERY_ALLOCATED_RANGES, true,
3411 (char *)&in_data, sizeof(in_data),
3412 1024 * sizeof(struct file_allocated_range_buffer),
3413 (char **)&out_data, &out_data_len);
3414 if (rc == -E2BIG) {
3415 last_blob = 0;
3416 rc = 0;
3417 } else
3418 last_blob = 1;
3419 if (rc)
3420 goto out;
3421
3422 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3423 rc = -EINVAL;
3424 goto out;
3425 }
3426 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3427 rc = -EINVAL;
3428 goto out;
3429 }
3430
3431 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3432 for (i = 0; i < num; i++) {
3433 flags = 0;
3434 if (i == num - 1 && last_blob)
3435 flags |= FIEMAP_EXTENT_LAST;
3436
3437 rc = fiemap_fill_next_extent(fei,
3438 le64_to_cpu(out_data[i].file_offset),
3439 le64_to_cpu(out_data[i].file_offset),
3440 le64_to_cpu(out_data[i].length),
3441 flags);
3442 if (rc < 0)
3443 goto out;
3444 if (rc == 1) {
3445 rc = 0;
3446 goto out;
3447 }
3448 }
3449
3450 if (!last_blob) {
3451 next = le64_to_cpu(out_data[num - 1].file_offset) +
3452 le64_to_cpu(out_data[num - 1].length);
3453 len = len - (next - start);
3454 start = next;
3455 goto again;
3456 }
3457
3458 out:
3459 free_xid(xid);
3460 kfree(out_data);
3461 return rc;
3462}
Steve French9ccf3212014-10-18 17:01:15 -05003463
Steve French31742c52014-08-17 08:38:47 -05003464static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3465 loff_t off, loff_t len)
3466{
3467 /* KEEP_SIZE already checked for by do_fallocate */
3468 if (mode & FALLOC_FL_PUNCH_HOLE)
3469 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05003470 else if (mode & FALLOC_FL_ZERO_RANGE) {
3471 if (mode & FALLOC_FL_KEEP_SIZE)
3472 return smb3_zero_range(file, tcon, off, len, true);
3473 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05003474 } else if (mode == FALLOC_FL_KEEP_SIZE)
3475 return smb3_simple_falloc(file, tcon, off, len, true);
3476 else if (mode == 0)
3477 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05003478
3479 return -EOPNOTSUPP;
3480}
3481
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003482static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003483smb2_downgrade_oplock(struct TCP_Server_Info *server,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003484 struct cifsInodeInfo *cinode, __u32 oplock,
3485 unsigned int epoch, bool *purge_cache)
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003486{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003487 server->ops->set_oplock_level(cinode, oplock, 0, NULL);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003488}
3489
3490static void
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003491smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3492 unsigned int epoch, bool *purge_cache);
3493
3494static void
3495smb3_downgrade_oplock(struct TCP_Server_Info *server,
3496 struct cifsInodeInfo *cinode, __u32 oplock,
3497 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003498{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003499 unsigned int old_state = cinode->oplock;
3500 unsigned int old_epoch = cinode->epoch;
3501 unsigned int new_state;
3502
3503 if (epoch > old_epoch) {
3504 smb21_set_oplock_level(cinode, oplock, 0, NULL);
3505 cinode->epoch = epoch;
3506 }
3507
3508 new_state = cinode->oplock;
3509 *purge_cache = false;
3510
3511 if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
3512 (new_state & CIFS_CACHE_READ_FLG) == 0)
3513 *purge_cache = true;
3514 else if (old_state == new_state && (epoch - old_epoch > 1))
3515 *purge_cache = true;
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003516}
3517
3518static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003519smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3520 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003521{
3522 oplock &= 0xFF;
3523 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3524 return;
3525 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003526 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003527 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3528 &cinode->vfs_inode);
3529 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003530 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003531 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3532 &cinode->vfs_inode);
3533 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3534 cinode->oplock = CIFS_CACHE_READ_FLG;
3535 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3536 &cinode->vfs_inode);
3537 } else
3538 cinode->oplock = 0;
3539}
3540
3541static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003542smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3543 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003544{
3545 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003546 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003547
3548 oplock &= 0xFF;
3549 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3550 return;
3551
Pavel Shilovskya016e272019-09-26 12:31:20 -07003552 /* Check if the server granted an oplock rather than a lease */
3553 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3554 return smb2_set_oplock_level(cinode, oplock, epoch,
3555 purge_cache);
3556
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003557 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003558 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003559 strcat(message, "R");
3560 }
3561 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003562 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003563 strcat(message, "H");
3564 }
3565 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003566 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003567 strcat(message, "W");
3568 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003569 if (!new_oplock)
3570 strncpy(message, "None", sizeof(message));
3571
3572 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003573 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3574 &cinode->vfs_inode);
3575}
3576
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003577static void
3578smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3579 unsigned int epoch, bool *purge_cache)
3580{
3581 unsigned int old_oplock = cinode->oplock;
3582
3583 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3584
3585 if (purge_cache) {
3586 *purge_cache = false;
3587 if (old_oplock == CIFS_CACHE_READ_FLG) {
3588 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3589 (epoch - cinode->epoch > 0))
3590 *purge_cache = true;
3591 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3592 (epoch - cinode->epoch > 1))
3593 *purge_cache = true;
3594 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3595 (epoch - cinode->epoch > 1))
3596 *purge_cache = true;
3597 else if (cinode->oplock == 0 &&
3598 (epoch - cinode->epoch > 0))
3599 *purge_cache = true;
3600 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
3601 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3602 (epoch - cinode->epoch > 0))
3603 *purge_cache = true;
3604 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3605 (epoch - cinode->epoch > 1))
3606 *purge_cache = true;
3607 }
3608 cinode->epoch = epoch;
3609 }
3610}
3611
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003612static bool
3613smb2_is_read_op(__u32 oplock)
3614{
3615 return oplock == SMB2_OPLOCK_LEVEL_II;
3616}
3617
3618static bool
3619smb21_is_read_op(__u32 oplock)
3620{
3621 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3622 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3623}
3624
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003625static __le32
3626map_oplock_to_lease(u8 oplock)
3627{
3628 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3629 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3630 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3631 return SMB2_LEASE_READ_CACHING;
3632 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3633 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3634 SMB2_LEASE_WRITE_CACHING;
3635 return 0;
3636}
3637
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003638static char *
3639smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3640{
3641 struct create_lease *buf;
3642
3643 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
3644 if (!buf)
3645 return NULL;
3646
Stefano Brivio729c0c92018-07-05 15:10:02 +02003647 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003648 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003649
3650 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3651 (struct create_lease, lcontext));
3652 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
3653 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3654 (struct create_lease, Name));
3655 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003656 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003657 buf->Name[0] = 'R';
3658 buf->Name[1] = 'q';
3659 buf->Name[2] = 'L';
3660 buf->Name[3] = 's';
3661 return (char *)buf;
3662}
3663
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003664static char *
3665smb3_create_lease_buf(u8 *lease_key, u8 oplock)
3666{
3667 struct create_lease_v2 *buf;
3668
3669 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
3670 if (!buf)
3671 return NULL;
3672
Stefano Brivio729c0c92018-07-05 15:10:02 +02003673 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003674 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3675
3676 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3677 (struct create_lease_v2, lcontext));
3678 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
3679 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3680 (struct create_lease_v2, Name));
3681 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003682 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003683 buf->Name[0] = 'R';
3684 buf->Name[1] = 'q';
3685 buf->Name[2] = 'L';
3686 buf->Name[3] = 's';
3687 return (char *)buf;
3688}
3689
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003690static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003691smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003692{
3693 struct create_lease *lc = (struct create_lease *)buf;
3694
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003695 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003696 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3697 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3698 return le32_to_cpu(lc->lcontext.LeaseState);
3699}
3700
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003701static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003702smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003703{
3704 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
3705
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003706 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003707 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3708 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003709 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02003710 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003711 return le32_to_cpu(lc->lcontext.LeaseState);
3712}
3713
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003714static unsigned int
3715smb2_wp_retry_size(struct inode *inode)
3716{
3717 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
3718 SMB2_MAX_BUFFER_SIZE);
3719}
3720
Pavel Shilovsky52755802014-08-18 20:49:57 +04003721static bool
3722smb2_dir_needs_close(struct cifsFileInfo *cfile)
3723{
3724 return !cfile->invalidHandle;
3725}
3726
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003727static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003728fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
Steve French2b2f7542019-06-07 15:16:10 -05003729 struct smb_rqst *old_rq, __le16 cipher_type)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003730{
3731 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003732 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003733
3734 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
3735 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
3736 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
3737 tr_hdr->Flags = cpu_to_le16(0x01);
Steve French2b2f7542019-06-07 15:16:10 -05003738 if (cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3739 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3740 else
3741 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003742 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003743}
3744
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003745/* We can not use the normal sg_set_buf() as we will sometimes pass a
3746 * stack object as buf.
3747 */
3748static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3749 unsigned int buflen)
3750{
Sebastien Tisserantee9d6612019-08-01 12:06:08 -05003751 void *addr;
3752 /*
3753 * VMAP_STACK (at least) puts stack into the vmalloc address space
3754 */
3755 if (is_vmalloc_addr(buf))
3756 addr = vmalloc_to_page(buf);
3757 else
3758 addr = virt_to_page(buf);
3759 sg_set_page(sg, addr, buflen, offset_in_page(buf));
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003760}
3761
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003762/* Assumes the first rqst has a transform header as the first iov.
3763 * I.e.
3764 * rqst[0].rq_iov[0] is transform header
3765 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
3766 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003767 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003768static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003769init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003770{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003771 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003772 struct scatterlist *sg;
3773 unsigned int i;
3774 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003775 unsigned int idx = 0;
3776 int skip;
3777
3778 sg_len = 1;
3779 for (i = 0; i < num_rqst; i++)
3780 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003781
3782 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
3783 if (!sg)
3784 return NULL;
3785
3786 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003787 for (i = 0; i < num_rqst; i++) {
3788 for (j = 0; j < rqst[i].rq_nvec; j++) {
3789 /*
3790 * The first rqst has a transform header where the
3791 * first 20 bytes are not part of the encrypted blob
3792 */
3793 skip = (i == 0) && (j == 0) ? 20 : 0;
3794 smb2_sg_set_buf(&sg[idx++],
3795 rqst[i].rq_iov[j].iov_base + skip,
3796 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10003797 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05003798
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003799 for (j = 0; j < rqst[i].rq_npages; j++) {
3800 unsigned int len, offset;
3801
3802 rqst_page_get_length(&rqst[i], j, &len, &offset);
3803 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
3804 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003805 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003806 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003807 return sg;
3808}
3809
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003810static int
3811smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
3812{
3813 struct cifs_ses *ses;
3814 u8 *ses_enc_key;
3815
3816 spin_lock(&cifs_tcp_ses_lock);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02003817 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
3818 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3819 if (ses->Suid == ses_id) {
3820 ses_enc_key = enc ? ses->smb3encryptionkey :
3821 ses->smb3decryptionkey;
3822 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
3823 spin_unlock(&cifs_tcp_ses_lock);
3824 return 0;
3825 }
3826 }
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003827 }
3828 spin_unlock(&cifs_tcp_ses_lock);
3829
3830 return 1;
3831}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003832/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003833 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
3834 * iov[0] - transform header (associate data),
3835 * iov[1-N] - SMB2 header and pages - data to encrypt.
3836 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003837 * untouched.
3838 */
3839static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003840crypt_message(struct TCP_Server_Info *server, int num_rqst,
3841 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003842{
3843 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003844 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003845 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003846 int rc = 0;
3847 struct scatterlist *sg;
3848 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003849 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003850 struct aead_request *req;
3851 char *iv;
3852 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003853 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003854 struct crypto_aead *tfm;
3855 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3856
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003857 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
3858 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003859 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003860 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003861 return 0;
3862 }
3863
3864 rc = smb3_crypto_aead_allocate(server);
3865 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003866 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003867 return rc;
3868 }
3869
3870 tfm = enc ? server->secmech.ccmaesencrypt :
3871 server->secmech.ccmaesdecrypt;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003872 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003873 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003874 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003875 return rc;
3876 }
3877
3878 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3879 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003880 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003881 return rc;
3882 }
3883
3884 req = aead_request_alloc(tfm, GFP_KERNEL);
3885 if (!req) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003886 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003887 return -ENOMEM;
3888 }
3889
3890 if (!enc) {
3891 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3892 crypt_len += SMB2_SIGNATURE_SIZE;
3893 }
3894
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003895 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003896 if (!sg) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003897 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003898 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003899 goto free_req;
3900 }
3901
3902 iv_len = crypto_aead_ivsize(tfm);
3903 iv = kzalloc(iv_len, GFP_KERNEL);
3904 if (!iv) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003905 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003906 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003907 goto free_sg;
3908 }
Steve French2b2f7542019-06-07 15:16:10 -05003909
3910 if (server->cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3911 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3912 else {
3913 iv[0] = 3;
3914 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
3915 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003916
3917 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
3918 aead_request_set_ad(req, assoc_data_len);
3919
3920 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003921 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003922
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003923 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
3924 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003925
3926 if (!rc && enc)
3927 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
3928
3929 kfree(iv);
3930free_sg:
3931 kfree(sg);
3932free_req:
3933 kfree(req);
3934 return rc;
3935}
3936
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003937void
3938smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003939{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003940 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003941
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003942 for (i = 0; i < num_rqst; i++) {
3943 if (rqst[i].rq_pages) {
3944 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
3945 put_page(rqst[i].rq_pages[j]);
3946 kfree(rqst[i].rq_pages);
3947 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003948 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003949}
3950
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003951/*
3952 * This function will initialize new_rq and encrypt the content.
3953 * The first entry, new_rq[0], only contains a single iov which contains
3954 * a smb2_transform_hdr and is pre-allocated by the caller.
3955 * This function then populates new_rq[1+] with the content from olq_rq[0+].
3956 *
3957 * The end result is an array of smb_rqst structures where the first structure
3958 * only contains a single iov for the transform header which we then can pass
3959 * to crypt_message().
3960 *
3961 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
3962 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
3963 */
3964static int
3965smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
3966 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003967{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003968 struct page **pages;
3969 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
3970 unsigned int npages;
3971 unsigned int orig_len = 0;
3972 int i, j;
3973 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003974
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003975 for (i = 1; i < num_rqst; i++) {
3976 npages = old_rq[i - 1].rq_npages;
3977 pages = kmalloc_array(npages, sizeof(struct page *),
3978 GFP_KERNEL);
3979 if (!pages)
3980 goto err_free;
3981
3982 new_rq[i].rq_pages = pages;
3983 new_rq[i].rq_npages = npages;
3984 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
3985 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
3986 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
3987 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
3988 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
3989
3990 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
3991
3992 for (j = 0; j < npages; j++) {
3993 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3994 if (!pages[j])
3995 goto err_free;
3996 }
3997
3998 /* copy pages form the old */
3999 for (j = 0; j < npages; j++) {
4000 char *dst, *src;
4001 unsigned int offset, len;
4002
4003 rqst_page_get_length(&new_rq[i], j, &len, &offset);
4004
4005 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
4006 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
4007
4008 memcpy(dst, src, len);
4009 kunmap(new_rq[i].rq_pages[j]);
4010 kunmap(old_rq[i - 1].rq_pages[j]);
4011 }
4012 }
4013
4014 /* fill the 1st iov with a transform header */
Steve French2b2f7542019-06-07 15:16:10 -05004015 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004016
4017 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02004018 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004019 if (rc)
4020 goto err_free;
4021
4022 return rc;
4023
4024err_free:
4025 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
4026 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004027}
4028
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004029static int
4030smb3_is_transform_hdr(void *buf)
4031{
4032 struct smb2_transform_hdr *trhdr = buf;
4033
4034 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
4035}
4036
4037static int
4038decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
4039 unsigned int buf_data_size, struct page **pages,
4040 unsigned int npages, unsigned int page_data_size)
4041{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004042 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004043 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004044 int rc;
4045
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004046 iov[0].iov_base = buf;
4047 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
4048 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
4049 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004050
4051 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004052 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004053 rqst.rq_pages = pages;
4054 rqst.rq_npages = npages;
4055 rqst.rq_pagesz = PAGE_SIZE;
4056 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
4057
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004058 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02004059 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004060
4061 if (rc)
4062 return rc;
4063
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004064 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004065
4066 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004067
4068 return rc;
4069}
4070
4071static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004072read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
4073 unsigned int npages, unsigned int len)
4074{
4075 int i;
4076 int length;
4077
4078 for (i = 0; i < npages; i++) {
4079 struct page *page = pages[i];
4080 size_t n;
4081
4082 n = len;
4083 if (len >= PAGE_SIZE) {
4084 /* enough data to fill the page */
4085 n = PAGE_SIZE;
4086 len -= n;
4087 } else {
4088 zero_user(page, len, PAGE_SIZE - len);
4089 len = 0;
4090 }
Long Li1dbe3462018-05-30 12:47:55 -07004091 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004092 if (length < 0)
4093 return length;
4094 server->total_read += length;
4095 }
4096
4097 return 0;
4098}
4099
4100static int
4101init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
4102 unsigned int cur_off, struct bio_vec **page_vec)
4103{
4104 struct bio_vec *bvec;
4105 int i;
4106
4107 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
4108 if (!bvec)
4109 return -ENOMEM;
4110
4111 for (i = 0; i < npages; i++) {
4112 bvec[i].bv_page = pages[i];
4113 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
4114 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
4115 data_size -= bvec[i].bv_len;
4116 }
4117
4118 if (data_size != 0) {
4119 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
4120 kfree(bvec);
4121 return -EIO;
4122 }
4123
4124 *page_vec = bvec;
4125 return 0;
4126}
4127
4128static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004129handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
4130 char *buf, unsigned int buf_len, struct page **pages,
4131 unsigned int npages, unsigned int page_data_size)
4132{
4133 unsigned int data_offset;
4134 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004135 unsigned int cur_off;
4136 unsigned int cur_page_idx;
4137 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004138 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10004139 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004140 struct bio_vec *bvec = NULL;
4141 struct iov_iter iter;
4142 struct kvec iov;
4143 int length;
Long Li74dcf412017-11-22 17:38:46 -07004144 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004145
4146 if (shdr->Command != SMB2_READ) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004147 cifs_server_dbg(VFS, "only big read responses are supported\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004148 return -ENOTSUPP;
4149 }
4150
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004151 if (server->ops->is_session_expired &&
4152 server->ops->is_session_expired(buf)) {
4153 cifs_reconnect(server);
4154 wake_up(&server->response_q);
4155 return -1;
4156 }
4157
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004158 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08004159 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004160 return -1;
4161
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004162 /* set up first two iov to get credits */
4163 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004164 rdata->iov[0].iov_len = 0;
4165 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004166 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004167 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004168 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4169 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4170 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4171 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4172
4173 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004174 if (rdata->result != 0) {
4175 cifs_dbg(FYI, "%s: server returned error %d\n",
4176 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004177 /* normal error on read response */
4178 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004179 return 0;
4180 }
4181
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004182 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07004183#ifdef CONFIG_CIFS_SMB_DIRECT
4184 use_rdma_mr = rdata->mr;
4185#endif
4186 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004187
4188 if (data_offset < server->vals->read_rsp_size) {
4189 /*
4190 * win2k8 sometimes sends an offset of 0 when the read
4191 * is beyond the EOF. Treat it as if the data starts just after
4192 * the header.
4193 */
4194 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4195 __func__, data_offset);
4196 data_offset = server->vals->read_rsp_size;
4197 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4198 /* data_offset is beyond the end of smallbuf */
4199 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4200 __func__, data_offset);
4201 rdata->result = -EIO;
4202 dequeue_mid(mid, rdata->result);
4203 return 0;
4204 }
4205
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004206 pad_len = data_offset - server->vals->read_rsp_size;
4207
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004208 if (buf_len <= data_offset) {
4209 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004210 cur_page_idx = pad_len / PAGE_SIZE;
4211 cur_off = pad_len % PAGE_SIZE;
4212
4213 if (cur_page_idx != 0) {
4214 /* data offset is beyond the 1st page of response */
4215 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4216 __func__, data_offset);
4217 rdata->result = -EIO;
4218 dequeue_mid(mid, rdata->result);
4219 return 0;
4220 }
4221
4222 if (data_len > page_data_size - pad_len) {
4223 /* data_len is corrupt -- discard frame */
4224 rdata->result = -EIO;
4225 dequeue_mid(mid, rdata->result);
4226 return 0;
4227 }
4228
4229 rdata->result = init_read_bvec(pages, npages, page_data_size,
4230 cur_off, &bvec);
4231 if (rdata->result != 0) {
4232 dequeue_mid(mid, rdata->result);
4233 return 0;
4234 }
4235
David Howellsaa563d72018-10-20 00:57:56 +01004236 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004237 } else if (buf_len >= data_offset + data_len) {
4238 /* read response payload is in buf */
4239 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
4240 iov.iov_base = buf + data_offset;
4241 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01004242 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004243 } else {
4244 /* read response payload cannot be in both buf and pages */
4245 WARN_ONCE(1, "buf can not contain only a part of read data");
4246 rdata->result = -EIO;
4247 dequeue_mid(mid, rdata->result);
4248 return 0;
4249 }
4250
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004251 length = rdata->copy_into_pages(server, rdata, &iter);
4252
4253 kfree(bvec);
4254
4255 if (length < 0)
4256 return length;
4257
4258 dequeue_mid(mid, false);
4259 return length;
4260}
4261
Steve French35cf94a2019-09-07 01:09:49 -05004262struct smb2_decrypt_work {
4263 struct work_struct decrypt;
4264 struct TCP_Server_Info *server;
4265 struct page **ppages;
4266 char *buf;
4267 unsigned int npages;
4268 unsigned int len;
4269};
4270
4271
4272static void smb2_decrypt_offload(struct work_struct *work)
4273{
4274 struct smb2_decrypt_work *dw = container_of(work,
4275 struct smb2_decrypt_work, decrypt);
4276 int i, rc;
4277 struct mid_q_entry *mid;
4278
4279 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
4280 dw->ppages, dw->npages, dw->len);
4281 if (rc) {
4282 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4283 goto free_pages;
4284 }
4285
Steve French22553972019-09-13 16:47:31 -05004286 dw->server->lstrp = jiffies;
Steve French35cf94a2019-09-07 01:09:49 -05004287 mid = smb2_find_mid(dw->server, dw->buf);
4288 if (mid == NULL)
4289 cifs_dbg(FYI, "mid not found\n");
4290 else {
4291 mid->decrypted = true;
4292 rc = handle_read_data(dw->server, mid, dw->buf,
4293 dw->server->vals->read_rsp_size,
4294 dw->ppages, dw->npages, dw->len);
Steve French22553972019-09-13 16:47:31 -05004295 mid->callback(mid);
4296 cifs_mid_q_entry_release(mid);
Steve French35cf94a2019-09-07 01:09:49 -05004297 }
4298
Steve French35cf94a2019-09-07 01:09:49 -05004299free_pages:
4300 for (i = dw->npages-1; i >= 0; i--)
4301 put_page(dw->ppages[i]);
4302
4303 kfree(dw->ppages);
4304 cifs_small_buf_release(dw->buf);
Steve Frencha08d8972019-10-26 16:00:44 -05004305 kfree(dw);
Steve French35cf94a2019-09-07 01:09:49 -05004306}
4307
4308
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004309static int
Steve French35cf94a2019-09-07 01:09:49 -05004310receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4311 int *num_mids)
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004312{
4313 char *buf = server->smallbuf;
4314 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4315 unsigned int npages;
4316 struct page **pages;
4317 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004318 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004319 int rc;
4320 int i = 0;
Steve French35cf94a2019-09-07 01:09:49 -05004321 struct smb2_decrypt_work *dw;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004322
Steve French35cf94a2019-09-07 01:09:49 -05004323 *num_mids = 1;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004324 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004325 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4326
4327 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4328 if (rc < 0)
4329 return rc;
4330 server->total_read += rc;
4331
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004332 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11004333 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004334 npages = DIV_ROUND_UP(len, PAGE_SIZE);
4335
4336 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
4337 if (!pages) {
4338 rc = -ENOMEM;
4339 goto discard_data;
4340 }
4341
4342 for (; i < npages; i++) {
4343 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4344 if (!pages[i]) {
4345 rc = -ENOMEM;
4346 goto discard_data;
4347 }
4348 }
4349
4350 /* read read data into pages */
4351 rc = read_data_into_pages(server, pages, npages, len);
4352 if (rc)
4353 goto free_pages;
4354
Pavel Shilovsky350be252017-04-10 10:31:33 -07004355 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004356 if (rc)
4357 goto free_pages;
4358
Steve French35cf94a2019-09-07 01:09:49 -05004359 /*
4360 * For large reads, offload to different thread for better performance,
4361 * use more cores decrypting which can be expensive
4362 */
4363
Steve French10328c42019-09-09 13:30:15 -05004364 if ((server->min_offload) && (server->in_flight > 1) &&
Steve French563317e2019-09-08 23:22:02 -05004365 (server->pdu_size >= server->min_offload)) {
Steve French35cf94a2019-09-07 01:09:49 -05004366 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4367 if (dw == NULL)
4368 goto non_offloaded_decrypt;
4369
4370 dw->buf = server->smallbuf;
4371 server->smallbuf = (char *)cifs_small_buf_get();
4372
4373 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4374
4375 dw->npages = npages;
4376 dw->server = server;
4377 dw->ppages = pages;
4378 dw->len = len;
Steve Frencha08d8972019-10-26 16:00:44 -05004379 queue_work(decrypt_wq, &dw->decrypt);
Steve French35cf94a2019-09-07 01:09:49 -05004380 *num_mids = 0; /* worker thread takes care of finding mid */
4381 return -1;
4382 }
4383
4384non_offloaded_decrypt:
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004385 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004386 pages, npages, len);
4387 if (rc)
4388 goto free_pages;
4389
4390 *mid = smb2_find_mid(server, buf);
4391 if (*mid == NULL)
4392 cifs_dbg(FYI, "mid not found\n");
4393 else {
4394 cifs_dbg(FYI, "mid found\n");
4395 (*mid)->decrypted = true;
4396 rc = handle_read_data(server, *mid, buf,
4397 server->vals->read_rsp_size,
4398 pages, npages, len);
4399 }
4400
4401free_pages:
4402 for (i = i - 1; i >= 0; i--)
4403 put_page(pages[i]);
4404 kfree(pages);
4405 return rc;
4406discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07004407 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004408 goto free_pages;
4409}
4410
4411static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004412receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004413 struct mid_q_entry **mids, char **bufs,
4414 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004415{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004416 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004417 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004418 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004419 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004420 unsigned int buf_size;
4421 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004422 int next_is_large;
4423 char *next_buffer = NULL;
4424
4425 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004426
4427 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004428 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004429 server->large_buf = true;
4430 memcpy(server->bigbuf, buf, server->total_read);
4431 buf = server->bigbuf;
4432 }
4433
4434 /* now read the rest */
4435 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004436 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004437 if (length < 0)
4438 return length;
4439 server->total_read += length;
4440
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004441 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004442 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
4443 if (length)
4444 return length;
4445
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004446 next_is_large = server->large_buf;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004447one_more:
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004448 shdr = (struct smb2_sync_hdr *)buf;
4449 if (shdr->NextCommand) {
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004450 if (next_is_large)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004451 next_buffer = (char *)cifs_buf_get();
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004452 else
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004453 next_buffer = (char *)cifs_small_buf_get();
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004454 memcpy(next_buffer,
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004455 buf + le32_to_cpu(shdr->NextCommand),
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004456 pdu_length - le32_to_cpu(shdr->NextCommand));
4457 }
4458
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004459 mid_entry = smb2_find_mid(server, buf);
4460 if (mid_entry == NULL)
4461 cifs_dbg(FYI, "mid not found\n");
4462 else {
4463 cifs_dbg(FYI, "mid found\n");
4464 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004465 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004466 }
4467
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004468 if (*num_mids >= MAX_COMPOUND) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004469 cifs_server_dbg(VFS, "too many PDUs in compound\n");
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004470 return -1;
4471 }
4472 bufs[*num_mids] = buf;
4473 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004474
4475 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004476 ret = mid_entry->handle(server, mid_entry);
4477 else
4478 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004479
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004480 if (ret == 0 && shdr->NextCommand) {
4481 pdu_length -= le32_to_cpu(shdr->NextCommand);
4482 server->large_buf = next_is_large;
4483 if (next_is_large)
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004484 server->bigbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004485 else
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004486 server->smallbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004487 goto one_more;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004488 } else if (ret != 0) {
4489 /*
4490 * ret != 0 here means that we didn't get to handle_mid() thus
4491 * server->smallbuf and server->bigbuf are still valid. We need
4492 * to free next_buffer because it is not going to be used
4493 * anywhere.
4494 */
4495 if (next_is_large)
4496 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4497 else
4498 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004499 }
4500
4501 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004502}
4503
4504static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004505smb3_receive_transform(struct TCP_Server_Info *server,
4506 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004507{
4508 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004509 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004510 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4511 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4512
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004513 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004514 sizeof(struct smb2_sync_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004515 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004516 pdu_length);
4517 cifs_reconnect(server);
4518 wake_up(&server->response_q);
4519 return -ECONNABORTED;
4520 }
4521
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004522 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004523 cifs_server_dbg(VFS, "Transform message is broken\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004524 cifs_reconnect(server);
4525 wake_up(&server->response_q);
4526 return -ECONNABORTED;
4527 }
4528
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004529 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004530 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
Steve French35cf94a2019-09-07 01:09:49 -05004531 return receive_encrypted_read(server, &mids[0], num_mids);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004532 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004533
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004534 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004535}
4536
4537int
4538smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4539{
4540 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
4541
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004542 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004543 NULL, 0, 0);
4544}
4545
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004546static int
4547smb2_next_header(char *buf)
4548{
4549 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
4550 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
4551
4552 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
4553 return sizeof(struct smb2_transform_hdr) +
4554 le32_to_cpu(t_hdr->OriginalMessageSize);
4555
4556 return le32_to_cpu(hdr->NextCommand);
4557}
4558
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004559static int
4560smb2_make_node(unsigned int xid, struct inode *inode,
4561 struct dentry *dentry, struct cifs_tcon *tcon,
4562 char *full_path, umode_t mode, dev_t dev)
4563{
4564 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4565 int rc = -EPERM;
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004566 FILE_ALL_INFO *buf = NULL;
4567 struct cifs_io_parms io_parms;
4568 __u32 oplock = 0;
4569 struct cifs_fid fid;
4570 struct cifs_open_parms oparms;
4571 unsigned int bytes_written;
4572 struct win_dev *pdev;
4573 struct kvec iov[2];
4574
4575 /*
4576 * Check if mounted with mount parm 'sfu' mount parm.
4577 * SFU emulation should work with all servers, but only
4578 * supports block and char device (no socket & fifo),
4579 * and was used by default in earlier versions of Windows
4580 */
4581 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
4582 goto out;
4583
4584 /*
4585 * TODO: Add ability to create instead via reparse point. Windows (e.g.
4586 * their current NFS server) uses this approach to expose special files
4587 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
4588 */
4589
4590 if (!S_ISCHR(mode) && !S_ISBLK(mode))
4591 goto out;
4592
4593 cifs_dbg(FYI, "sfu compat create special file\n");
4594
4595 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
4596 if (buf == NULL) {
4597 rc = -ENOMEM;
4598 goto out;
4599 }
4600
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004601 oparms.tcon = tcon;
4602 oparms.cifs_sb = cifs_sb;
4603 oparms.desired_access = GENERIC_WRITE;
Amir Goldstein0f060932020-02-03 21:46:43 +02004604 oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
4605 CREATE_OPTION_SPECIAL);
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004606 oparms.disposition = FILE_CREATE;
4607 oparms.path = full_path;
4608 oparms.fid = &fid;
4609 oparms.reconnect = false;
4610
4611 if (tcon->ses->server->oplocks)
4612 oplock = REQ_OPLOCK;
4613 else
4614 oplock = 0;
4615 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
4616 if (rc)
4617 goto out;
4618
4619 /*
4620 * BB Do not bother to decode buf since no local inode yet to put
4621 * timestamps in, but we can reuse it safely.
4622 */
4623
4624 pdev = (struct win_dev *)buf;
4625 io_parms.pid = current->tgid;
4626 io_parms.tcon = tcon;
4627 io_parms.offset = 0;
4628 io_parms.length = sizeof(struct win_dev);
4629 iov[1].iov_base = buf;
4630 iov[1].iov_len = sizeof(struct win_dev);
4631 if (S_ISCHR(mode)) {
4632 memcpy(pdev->type, "IntxCHR", 8);
4633 pdev->major = cpu_to_le64(MAJOR(dev));
4634 pdev->minor = cpu_to_le64(MINOR(dev));
4635 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4636 &bytes_written, iov, 1);
4637 } else if (S_ISBLK(mode)) {
4638 memcpy(pdev->type, "IntxBLK", 8);
4639 pdev->major = cpu_to_le64(MAJOR(dev));
4640 pdev->minor = cpu_to_le64(MINOR(dev));
4641 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4642 &bytes_written, iov, 1);
4643 }
4644 tcon->ses->server->ops->close(xid, tcon, &fid);
4645 d_drop(dentry);
4646
4647 /* FIXME: add code here to set EAs */
4648out:
4649 kfree(buf);
4650 return rc;
4651}
4652
4653
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004654struct smb_version_operations smb20_operations = {
4655 .compare_fids = smb2_compare_fids,
4656 .setup_request = smb2_setup_request,
4657 .setup_async_request = smb2_setup_async_request,
4658 .check_receive = smb2_check_receive,
4659 .add_credits = smb2_add_credits,
4660 .set_credits = smb2_set_credits,
4661 .get_credits_field = smb2_get_credits_field,
4662 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004663 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004664 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004665 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004666 .read_data_offset = smb2_read_data_offset,
4667 .read_data_length = smb2_read_data_length,
4668 .map_error = map_smb2_to_linux_error,
4669 .find_mid = smb2_find_mid,
4670 .check_message = smb2_check_message,
4671 .dump_detail = smb2_dump_detail,
4672 .clear_stats = smb2_clear_stats,
4673 .print_stats = smb2_print_stats,
4674 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004675 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004676 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004677 .need_neg = smb2_need_neg,
4678 .negotiate = smb2_negotiate,
4679 .negotiate_wsize = smb2_negotiate_wsize,
4680 .negotiate_rsize = smb2_negotiate_rsize,
4681 .sess_setup = SMB2_sess_setup,
4682 .logoff = SMB2_logoff,
4683 .tree_connect = SMB2_tcon,
4684 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004685 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004686 .is_path_accessible = smb2_is_path_accessible,
4687 .can_echo = smb2_can_echo,
4688 .echo = SMB2_echo,
4689 .query_path_info = smb2_query_path_info,
4690 .get_srv_inum = smb2_get_srv_inum,
4691 .query_file_info = smb2_query_file_info,
4692 .set_path_size = smb2_set_path_size,
4693 .set_file_size = smb2_set_file_size,
4694 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004695 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004696 .mkdir = smb2_mkdir,
4697 .mkdir_setinfo = smb2_mkdir_setinfo,
4698 .rmdir = smb2_rmdir,
4699 .unlink = smb2_unlink,
4700 .rename = smb2_rename_path,
4701 .create_hardlink = smb2_create_hardlink,
4702 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01004703 .query_mf_symlink = smb3_query_mf_symlink,
4704 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004705 .open = smb2_open_file,
4706 .set_fid = smb2_set_fid,
4707 .close = smb2_close_file,
4708 .flush = smb2_flush_file,
4709 .async_readv = smb2_async_readv,
4710 .async_writev = smb2_async_writev,
4711 .sync_read = smb2_sync_read,
4712 .sync_write = smb2_sync_write,
4713 .query_dir_first = smb2_query_dir_first,
4714 .query_dir_next = smb2_query_dir_next,
4715 .close_dir = smb2_close_dir,
4716 .calc_smb_size = smb2_calc_size,
4717 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004718 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004719 .oplock_response = smb2_oplock_response,
4720 .queryfs = smb2_queryfs,
4721 .mand_lock = smb2_mand_lock,
4722 .mand_unlock_range = smb2_unlock_range,
4723 .push_mand_locks = smb2_push_mandatory_locks,
4724 .get_lease_key = smb2_get_lease_key,
4725 .set_lease_key = smb2_set_lease_key,
4726 .new_lease_key = smb2_new_lease_key,
4727 .calc_signature = smb2_calc_signature,
4728 .is_read_op = smb2_is_read_op,
4729 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004730 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004731 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004732 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004733 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004734 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004735 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304736 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004737#ifdef CONFIG_CIFS_XATTR
4738 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004739 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004740#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004741 .get_acl = get_smb2_acl,
4742 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004743 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004744 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004745 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004746 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004747 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004748 .llseek = smb3_llseek,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004749};
4750
Steve French1080ef72011-02-24 18:07:19 +00004751struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004752 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004753 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04004754 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004755 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04004756 .add_credits = smb2_add_credits,
4757 .set_credits = smb2_set_credits,
4758 .get_credits_field = smb2_get_credits_field,
4759 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004760 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004761 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004762 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004763 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004764 .read_data_offset = smb2_read_data_offset,
4765 .read_data_length = smb2_read_data_length,
4766 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004767 .find_mid = smb2_find_mid,
4768 .check_message = smb2_check_message,
4769 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04004770 .clear_stats = smb2_clear_stats,
4771 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004772 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004773 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004774 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04004775 .need_neg = smb2_need_neg,
4776 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07004777 .negotiate_wsize = smb2_negotiate_wsize,
4778 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04004779 .sess_setup = SMB2_sess_setup,
4780 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04004781 .tree_connect = SMB2_tcon,
4782 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004783 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04004784 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04004785 .can_echo = smb2_can_echo,
4786 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04004787 .query_path_info = smb2_query_path_info,
4788 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07004789 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004790 .set_path_size = smb2_set_path_size,
4791 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004792 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004793 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04004794 .mkdir = smb2_mkdir,
4795 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04004796 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07004797 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004798 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07004799 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004800 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004801 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004802 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07004803 .open = smb2_open_file,
4804 .set_fid = smb2_set_fid,
4805 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07004806 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004807 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07004808 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004809 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004810 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004811 .query_dir_first = smb2_query_dir_first,
4812 .query_dir_next = smb2_query_dir_next,
4813 .close_dir = smb2_close_dir,
4814 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07004815 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004816 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004817 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07004818 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07004819 .mand_lock = smb2_mand_lock,
4820 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07004821 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07004822 .get_lease_key = smb2_get_lease_key,
4823 .set_lease_key = smb2_set_lease_key,
4824 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06004825 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004826 .is_read_op = smb21_is_read_op,
4827 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004828 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004829 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004830 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004831 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004832 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05004833 .enum_snapshots = smb3_enum_snapshots,
Steve French2c6251a2020-02-12 22:37:08 -06004834 .notify = smb3_notify,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004835 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304836 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004837#ifdef CONFIG_CIFS_XATTR
4838 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004839 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004840#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004841 .get_acl = get_smb2_acl,
4842 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004843 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004844 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004845 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004846 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004847 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004848 .llseek = smb3_llseek,
Steve French38107d42012-12-08 22:08:06 -06004849};
4850
Steve French38107d42012-12-08 22:08:06 -06004851struct smb_version_operations smb30_operations = {
4852 .compare_fids = smb2_compare_fids,
4853 .setup_request = smb2_setup_request,
4854 .setup_async_request = smb2_setup_async_request,
4855 .check_receive = smb2_check_receive,
4856 .add_credits = smb2_add_credits,
4857 .set_credits = smb2_set_credits,
4858 .get_credits_field = smb2_get_credits_field,
4859 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004860 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004861 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06004862 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004863 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06004864 .read_data_offset = smb2_read_data_offset,
4865 .read_data_length = smb2_read_data_length,
4866 .map_error = map_smb2_to_linux_error,
4867 .find_mid = smb2_find_mid,
4868 .check_message = smb2_check_message,
4869 .dump_detail = smb2_dump_detail,
4870 .clear_stats = smb2_clear_stats,
4871 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05004872 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06004873 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004874 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004875 .downgrade_oplock = smb3_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06004876 .need_neg = smb2_need_neg,
4877 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004878 .negotiate_wsize = smb3_negotiate_wsize,
4879 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06004880 .sess_setup = SMB2_sess_setup,
4881 .logoff = SMB2_logoff,
4882 .tree_connect = SMB2_tcon,
4883 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05004884 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06004885 .is_path_accessible = smb2_is_path_accessible,
4886 .can_echo = smb2_can_echo,
4887 .echo = SMB2_echo,
4888 .query_path_info = smb2_query_path_info,
4889 .get_srv_inum = smb2_get_srv_inum,
4890 .query_file_info = smb2_query_file_info,
4891 .set_path_size = smb2_set_path_size,
4892 .set_file_size = smb2_set_file_size,
4893 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004894 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06004895 .mkdir = smb2_mkdir,
4896 .mkdir_setinfo = smb2_mkdir_setinfo,
4897 .rmdir = smb2_rmdir,
4898 .unlink = smb2_unlink,
4899 .rename = smb2_rename_path,
4900 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004901 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004902 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004903 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06004904 .open = smb2_open_file,
4905 .set_fid = smb2_set_fid,
4906 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06004907 .close_getattr = smb2_close_getattr,
Steve French38107d42012-12-08 22:08:06 -06004908 .flush = smb2_flush_file,
4909 .async_readv = smb2_async_readv,
4910 .async_writev = smb2_async_writev,
4911 .sync_read = smb2_sync_read,
4912 .sync_write = smb2_sync_write,
4913 .query_dir_first = smb2_query_dir_first,
4914 .query_dir_next = smb2_query_dir_next,
4915 .close_dir = smb2_close_dir,
4916 .calc_smb_size = smb2_calc_size,
4917 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004918 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06004919 .oplock_response = smb2_oplock_response,
4920 .queryfs = smb2_queryfs,
4921 .mand_lock = smb2_mand_lock,
4922 .mand_unlock_range = smb2_unlock_range,
4923 .push_mand_locks = smb2_push_mandatory_locks,
4924 .get_lease_key = smb2_get_lease_key,
4925 .set_lease_key = smb2_set_lease_key,
4926 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004927 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06004928 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004929 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004930 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004931 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004932 .create_lease_buf = smb3_create_lease_buf,
4933 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004934 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05004935 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06004936 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004937 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004938 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05004939 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004940 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06004941 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004942 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004943 .is_transform_hdr = smb3_is_transform_hdr,
4944 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004945 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304946 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004947#ifdef CONFIG_CIFS_XATTR
4948 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004949 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004950#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004951 .get_acl = get_smb2_acl,
4952 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004953 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004954 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004955 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004956 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004957 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004958 .llseek = smb3_llseek,
Steve French1080ef72011-02-24 18:07:19 +00004959};
4960
Steve Frenchaab18932015-06-23 23:37:11 -05004961struct smb_version_operations smb311_operations = {
4962 .compare_fids = smb2_compare_fids,
4963 .setup_request = smb2_setup_request,
4964 .setup_async_request = smb2_setup_async_request,
4965 .check_receive = smb2_check_receive,
4966 .add_credits = smb2_add_credits,
4967 .set_credits = smb2_set_credits,
4968 .get_credits_field = smb2_get_credits_field,
4969 .get_credits = smb2_get_credits,
4970 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004971 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05004972 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004973 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05004974 .read_data_offset = smb2_read_data_offset,
4975 .read_data_length = smb2_read_data_length,
4976 .map_error = map_smb2_to_linux_error,
4977 .find_mid = smb2_find_mid,
4978 .check_message = smb2_check_message,
4979 .dump_detail = smb2_dump_detail,
4980 .clear_stats = smb2_clear_stats,
4981 .print_stats = smb2_print_stats,
4982 .dump_share_caps = smb2_dump_share_caps,
4983 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004984 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004985 .downgrade_oplock = smb3_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05004986 .need_neg = smb2_need_neg,
4987 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004988 .negotiate_wsize = smb3_negotiate_wsize,
4989 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05004990 .sess_setup = SMB2_sess_setup,
4991 .logoff = SMB2_logoff,
4992 .tree_connect = SMB2_tcon,
4993 .tree_disconnect = SMB2_tdis,
4994 .qfs_tcon = smb3_qfs_tcon,
4995 .is_path_accessible = smb2_is_path_accessible,
4996 .can_echo = smb2_can_echo,
4997 .echo = SMB2_echo,
4998 .query_path_info = smb2_query_path_info,
4999 .get_srv_inum = smb2_get_srv_inum,
5000 .query_file_info = smb2_query_file_info,
5001 .set_path_size = smb2_set_path_size,
5002 .set_file_size = smb2_set_file_size,
5003 .set_file_info = smb2_set_file_info,
5004 .set_compression = smb2_set_compression,
5005 .mkdir = smb2_mkdir,
5006 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05005007 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05005008 .rmdir = smb2_rmdir,
5009 .unlink = smb2_unlink,
5010 .rename = smb2_rename_path,
5011 .create_hardlink = smb2_create_hardlink,
5012 .query_symlink = smb2_query_symlink,
5013 .query_mf_symlink = smb3_query_mf_symlink,
5014 .create_mf_symlink = smb3_create_mf_symlink,
5015 .open = smb2_open_file,
5016 .set_fid = smb2_set_fid,
5017 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06005018 .close_getattr = smb2_close_getattr,
Steve Frenchaab18932015-06-23 23:37:11 -05005019 .flush = smb2_flush_file,
5020 .async_readv = smb2_async_readv,
5021 .async_writev = smb2_async_writev,
5022 .sync_read = smb2_sync_read,
5023 .sync_write = smb2_sync_write,
5024 .query_dir_first = smb2_query_dir_first,
5025 .query_dir_next = smb2_query_dir_next,
5026 .close_dir = smb2_close_dir,
5027 .calc_smb_size = smb2_calc_size,
5028 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005029 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05005030 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05005031 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05005032 .mand_lock = smb2_mand_lock,
5033 .mand_unlock_range = smb2_unlock_range,
5034 .push_mand_locks = smb2_push_mandatory_locks,
5035 .get_lease_key = smb2_get_lease_key,
5036 .set_lease_key = smb2_set_lease_key,
5037 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06005038 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05005039 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05005040 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05005041 .is_read_op = smb21_is_read_op,
5042 .set_oplock_level = smb3_set_oplock_level,
5043 .create_lease_buf = smb3_create_lease_buf,
5044 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005045 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07005046 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05005047/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
5048 .wp_retry_size = smb2_wp_retry_size,
5049 .dir_needs_close = smb2_dir_needs_close,
5050 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05005051 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06005052 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07005053 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005054 .is_transform_hdr = smb3_is_transform_hdr,
5055 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005056 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305057 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005058#ifdef CONFIG_CIFS_XATTR
5059 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005060 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005061#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10005062 .get_acl = get_smb2_acl,
5063 .get_acl_by_fid = get_smb2_acl_by_fid,
5064 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005065 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005066 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005067 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005068 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005069 .llseek = smb3_llseek,
Steve Frenchaab18932015-06-23 23:37:11 -05005070};
Steve Frenchaab18932015-06-23 23:37:11 -05005071
Steve Frenchdd446b12012-11-28 23:21:06 -06005072struct smb_version_values smb20_values = {
5073 .version_string = SMB20_VERSION_STRING,
5074 .protocol_id = SMB20_PROT_ID,
5075 .req_capabilities = 0, /* MBZ */
5076 .large_lock_type = 0,
5077 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5078 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5079 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005080 .header_size = sizeof(struct smb2_sync_hdr),
5081 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06005082 .max_header_size = MAX_SMB2_HDR_SIZE,
5083 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5084 .lock_cmd = SMB2_LOCK,
5085 .cap_unix = 0,
5086 .cap_nt_find = SMB2_NT_FIND,
5087 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005088 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5089 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005090 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06005091};
5092
Steve French1080ef72011-02-24 18:07:19 +00005093struct smb_version_values smb21_values = {
5094 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005095 .protocol_id = SMB21_PROT_ID,
5096 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
5097 .large_lock_type = 0,
5098 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5099 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5100 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005101 .header_size = sizeof(struct smb2_sync_hdr),
5102 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005103 .max_header_size = MAX_SMB2_HDR_SIZE,
5104 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5105 .lock_cmd = SMB2_LOCK,
5106 .cap_unix = 0,
5107 .cap_nt_find = SMB2_NT_FIND,
5108 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005109 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5110 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005111 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05005112};
5113
Steve French9764c022017-09-17 10:41:35 -05005114struct smb_version_values smb3any_values = {
5115 .version_string = SMB3ANY_VERSION_STRING,
5116 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005117 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005118 .large_lock_type = 0,
5119 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5120 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5121 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005122 .header_size = sizeof(struct smb2_sync_hdr),
5123 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005124 .max_header_size = MAX_SMB2_HDR_SIZE,
5125 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5126 .lock_cmd = SMB2_LOCK,
5127 .cap_unix = 0,
5128 .cap_nt_find = SMB2_NT_FIND,
5129 .cap_large_files = SMB2_LARGE_FILES,
5130 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5131 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5132 .create_lease_size = sizeof(struct create_lease_v2),
5133};
5134
5135struct smb_version_values smbdefault_values = {
5136 .version_string = SMBDEFAULT_VERSION_STRING,
5137 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005138 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005139 .large_lock_type = 0,
5140 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5141 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5142 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005143 .header_size = sizeof(struct smb2_sync_hdr),
5144 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005145 .max_header_size = MAX_SMB2_HDR_SIZE,
5146 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5147 .lock_cmd = SMB2_LOCK,
5148 .cap_unix = 0,
5149 .cap_nt_find = SMB2_NT_FIND,
5150 .cap_large_files = SMB2_LARGE_FILES,
5151 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5152 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5153 .create_lease_size = sizeof(struct create_lease_v2),
5154};
5155
Steve Frenche4aa25e2012-10-01 12:26:22 -05005156struct smb_version_values smb30_values = {
5157 .version_string = SMB30_VERSION_STRING,
5158 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005159 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07005160 .large_lock_type = 0,
5161 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5162 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5163 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005164 .header_size = sizeof(struct smb2_sync_hdr),
5165 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04005166 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005167 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005168 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04005169 .cap_unix = 0,
5170 .cap_nt_find = SMB2_NT_FIND,
5171 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005172 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5173 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005174 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00005175};
Steve French20b6d8b2013-06-12 22:48:41 -05005176
5177struct smb_version_values smb302_values = {
5178 .version_string = SMB302_VERSION_STRING,
5179 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005180 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05005181 .large_lock_type = 0,
5182 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5183 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5184 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005185 .header_size = sizeof(struct smb2_sync_hdr),
5186 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05005187 .max_header_size = MAX_SMB2_HDR_SIZE,
5188 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5189 .lock_cmd = SMB2_LOCK,
5190 .cap_unix = 0,
5191 .cap_nt_find = SMB2_NT_FIND,
5192 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005193 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5194 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005195 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05005196};
Steve French5f7fbf72014-12-17 22:52:58 -06005197
Steve French5f7fbf72014-12-17 22:52:58 -06005198struct smb_version_values smb311_values = {
5199 .version_string = SMB311_VERSION_STRING,
5200 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005201 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06005202 .large_lock_type = 0,
5203 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5204 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5205 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005206 .header_size = sizeof(struct smb2_sync_hdr),
5207 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06005208 .max_header_size = MAX_SMB2_HDR_SIZE,
5209 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5210 .lock_cmd = SMB2_LOCK,
5211 .cap_unix = 0,
5212 .cap_nt_find = SMB2_NT_FIND,
5213 .cap_large_files = SMB2_LARGE_FILES,
5214 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5215 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5216 .create_lease_size = sizeof(struct create_lease_v2),
5217};