blob: aef33630e31512d33385fb404216ae820ab3aa61 [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Steve French1080ef72011-02-24 18:07:19 +00006 */
7
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07008#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07009#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050010#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070011#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020012#include <linux/uuid.h>
Aurelien Aptel35adffe2019-09-20 06:29:39 +020013#include <linux/sort.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070014#include <crypto/aead.h>
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +100015#include "cifsfs.h"
Steve French1080ef72011-02-24 18:07:19 +000016#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040017#include "smb2pdu.h"
18#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040019#include "cifsproto.h"
20#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040021#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070022#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070023#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050024#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070025#include "smbdirect.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040026
Pavel Shilovskyef68e832019-01-18 17:25:36 -080027/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040028static int
29change_conf(struct TCP_Server_Info *server)
30{
31 server->credits += server->echo_credits + server->oplock_credits;
32 server->oplock_credits = server->echo_credits = 0;
33 switch (server->credits) {
34 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080035 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040036 case 1:
37 server->echoes = false;
38 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040039 break;
40 case 2:
41 server->echoes = true;
42 server->oplocks = false;
43 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040044 break;
45 default:
46 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050047 if (enable_oplocks) {
48 server->oplocks = true;
49 server->oplock_credits = 1;
50 } else
51 server->oplocks = false;
52
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040053 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040054 }
55 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080056 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040057}
58
59static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080060smb2_add_credits(struct TCP_Server_Info *server,
61 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040062{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080063 int *val, rc = -1;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080064 unsigned int add = credits->value;
65 unsigned int instance = credits->instance;
66 bool reconnect_detected = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080067
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040068 spin_lock(&server->req_lock);
69 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050070
71 /* eg found case where write overlapping reconnect messed up credits */
72 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
73 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
74 server->hostname, *val);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080075 if ((instance == 0) || (instance == server->reconnect_instance))
76 *val += add;
77 else
78 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050079
Steve French141891f2016-09-23 00:44:16 -050080 if (*val > 65000) {
81 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
82 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
83 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040084 server->in_flight--;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040085 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040086 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070087 /*
88 * Sometimes server returns 0 credits on oplock break ack - we need to
89 * rebalance credits in this case.
90 */
91 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
92 server->oplocks) {
93 if (server->credits > 1) {
94 server->credits--;
95 server->oplock_credits++;
96 }
97 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040098 spin_unlock(&server->req_lock);
99 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800100
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800101 if (reconnect_detected)
102 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
103 add, instance);
104
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800105 if (server->tcpStatus == CifsNeedReconnect
106 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800107 return;
108
109 switch (rc) {
110 case -1:
111 /* change_conf hasn't been executed */
112 break;
113 case 0:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000114 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800115 break;
116 case 1:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000117 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800118 break;
119 case 2:
120 cifs_dbg(FYI, "disabling oplocks\n");
121 break;
122 default:
123 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
124 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400125}
126
127static void
128smb2_set_credits(struct TCP_Server_Info *server, const int val)
129{
130 spin_lock(&server->req_lock);
131 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500132 if (val == 1)
133 server->reconnect_instance++;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400134 spin_unlock(&server->req_lock);
Steve French6e4d3bb2018-09-22 11:25:04 -0500135 /* don't log while holding the lock */
136 if (val == 1)
137 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400138}
139
140static int *
141smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
142{
143 switch (optype) {
144 case CIFS_ECHO_OP:
145 return &server->echo_credits;
146 case CIFS_OBREAK_OP:
147 return &server->oplock_credits;
148 default:
149 return &server->credits;
150 }
151}
152
153static unsigned int
154smb2_get_credits(struct mid_q_entry *mid)
155{
Pavel Shilovsky86a79642019-11-21 11:35:13 -0800156 return mid->credits_received;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400157}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400158
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400159static int
160smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800161 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400162{
163 int rc = 0;
164 unsigned int scredits;
165
166 spin_lock(&server->req_lock);
167 while (1) {
168 if (server->credits <= 0) {
169 spin_unlock(&server->req_lock);
170 cifs_num_waiters_inc(server);
171 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000172 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400173 cifs_num_waiters_dec(server);
174 if (rc)
175 return rc;
176 spin_lock(&server->req_lock);
177 } else {
178 if (server->tcpStatus == CifsExiting) {
179 spin_unlock(&server->req_lock);
180 return -ENOENT;
181 }
182
183 scredits = server->credits;
184 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800185 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400186 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800187 credits->value = 0;
188 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400189 break;
190 }
191
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800192 /* leave some credits for reopen and other ops */
193 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400194 *num = min_t(unsigned int, size,
195 scredits * SMB2_MAX_BUFFER_SIZE);
196
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800197 credits->value =
198 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
199 credits->instance = server->reconnect_instance;
200 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400201 server->in_flight++;
Steve French1b63f182019-09-09 22:57:11 -0500202 if (server->in_flight > server->max_in_flight)
203 server->max_in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400204 break;
205 }
206 }
207 spin_unlock(&server->req_lock);
208 return rc;
209}
210
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800211static int
212smb2_adjust_credits(struct TCP_Server_Info *server,
213 struct cifs_credits *credits,
214 const unsigned int payload_size)
215{
216 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
217
218 if (!credits->value || credits->value == new_val)
219 return 0;
220
221 if (credits->value < new_val) {
222 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
223 credits->value, new_val);
224 return -ENOTSUPP;
225 }
226
227 spin_lock(&server->req_lock);
228
229 if (server->reconnect_instance != credits->instance) {
230 spin_unlock(&server->req_lock);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000231 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800232 credits->value - new_val);
233 return -EAGAIN;
234 }
235
236 server->credits += credits->value - new_val;
237 spin_unlock(&server->req_lock);
238 wake_up(&server->request_q);
239 credits->value = new_val;
240 return 0;
241}
242
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400243static __u64
244smb2_get_next_mid(struct TCP_Server_Info *server)
245{
246 __u64 mid;
247 /* for SMB2 we need the current value */
248 spin_lock(&GlobalMid_Lock);
249 mid = server->CurrentMid++;
250 spin_unlock(&GlobalMid_Lock);
251 return mid;
252}
Steve French1080ef72011-02-24 18:07:19 +0000253
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800254static void
255smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
256{
257 spin_lock(&GlobalMid_Lock);
258 if (server->CurrentMid >= val)
259 server->CurrentMid -= val;
260 spin_unlock(&GlobalMid_Lock);
261}
262
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400263static struct mid_q_entry *
264smb2_find_mid(struct TCP_Server_Info *server, char *buf)
265{
266 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000267 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700268 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400269
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700270 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000271 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600272 return NULL;
273 }
274
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400275 spin_lock(&GlobalMid_Lock);
276 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000277 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400278 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700279 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200280 kref_get(&mid->refcount);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400281 spin_unlock(&GlobalMid_Lock);
282 return mid;
283 }
284 }
285 spin_unlock(&GlobalMid_Lock);
286 return NULL;
287}
288
289static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600290smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400291{
292#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000293 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400294
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000295 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700296 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
297 shdr->ProcessId);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000298 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500299 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400300#endif
301}
302
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400303static bool
304smb2_need_neg(struct TCP_Server_Info *server)
305{
306 return server->max_read == 0;
307}
308
309static int
310smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
311{
312 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200313
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200314 cifs_ses_server(ses)->CurrentMid = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400315 rc = SMB2_negotiate(xid, ses);
316 /* BB we probably don't need to retry with modern servers */
317 if (rc == -EAGAIN)
318 rc = -EHOSTDOWN;
319 return rc;
320}
321
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700322static unsigned int
323smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
324{
325 struct TCP_Server_Info *server = tcon->ses->server;
326 unsigned int wsize;
327
328 /* start with specified wsize, or default */
329 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
330 wsize = min_t(unsigned int, wsize, server->max_write);
Long Li09902f82017-11-22 17:38:39 -0700331#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700332 if (server->rdma) {
333 if (server->sign)
334 wsize = min_t(unsigned int,
335 wsize, server->smbd_conn->max_fragmented_send_size);
336 else
337 wsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700338 wsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700339 }
Long Li09902f82017-11-22 17:38:39 -0700340#endif
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400341 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
342 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700343
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700344 return wsize;
345}
346
347static unsigned int
Steve French3d621232018-09-25 15:33:47 -0500348smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
349{
350 struct TCP_Server_Info *server = tcon->ses->server;
351 unsigned int wsize;
352
353 /* start with specified wsize, or default */
354 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
355 wsize = min_t(unsigned int, wsize, server->max_write);
356#ifdef CONFIG_CIFS_SMB_DIRECT
357 if (server->rdma) {
358 if (server->sign)
359 wsize = min_t(unsigned int,
360 wsize, server->smbd_conn->max_fragmented_send_size);
361 else
362 wsize = min_t(unsigned int,
363 wsize, server->smbd_conn->max_readwrite_size);
364 }
365#endif
366 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
367 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
368
369 return wsize;
370}
371
372static unsigned int
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700373smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
374{
375 struct TCP_Server_Info *server = tcon->ses->server;
376 unsigned int rsize;
377
378 /* start with specified rsize, or default */
379 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
380 rsize = min_t(unsigned int, rsize, server->max_read);
Long Li09902f82017-11-22 17:38:39 -0700381#ifdef CONFIG_CIFS_SMB_DIRECT
Long Libb4c0412018-04-17 12:17:08 -0700382 if (server->rdma) {
383 if (server->sign)
384 rsize = min_t(unsigned int,
385 rsize, server->smbd_conn->max_fragmented_recv_size);
386 else
387 rsize = min_t(unsigned int,
Long Li09902f82017-11-22 17:38:39 -0700388 rsize, server->smbd_conn->max_readwrite_size);
Long Libb4c0412018-04-17 12:17:08 -0700389 }
Long Li09902f82017-11-22 17:38:39 -0700390#endif
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400391
392 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
393 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700394
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700395 return rsize;
396}
397
Steve French3d621232018-09-25 15:33:47 -0500398static unsigned int
399smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
400{
401 struct TCP_Server_Info *server = tcon->ses->server;
402 unsigned int rsize;
403
404 /* start with specified rsize, or default */
405 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
406 rsize = min_t(unsigned int, rsize, server->max_read);
407#ifdef CONFIG_CIFS_SMB_DIRECT
408 if (server->rdma) {
409 if (server->sign)
410 rsize = min_t(unsigned int,
411 rsize, server->smbd_conn->max_fragmented_recv_size);
412 else
413 rsize = min_t(unsigned int,
414 rsize, server->smbd_conn->max_readwrite_size);
415 }
416#endif
417
418 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
419 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
420
421 return rsize;
422}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200423
424static int
425parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
426 size_t buf_len,
427 struct cifs_server_iface **iface_list,
428 size_t *iface_count)
429{
430 struct network_interface_info_ioctl_rsp *p;
431 struct sockaddr_in *addr4;
432 struct sockaddr_in6 *addr6;
433 struct iface_info_ipv4 *p4;
434 struct iface_info_ipv6 *p6;
435 struct cifs_server_iface *info;
436 ssize_t bytes_left;
437 size_t next = 0;
438 int nb_iface = 0;
439 int rc = 0;
440
441 *iface_list = NULL;
442 *iface_count = 0;
443
444 /*
445 * Fist pass: count and sanity check
446 */
447
448 bytes_left = buf_len;
449 p = buf;
450 while (bytes_left >= sizeof(*p)) {
451 nb_iface++;
452 next = le32_to_cpu(p->Next);
453 if (!next) {
454 bytes_left -= sizeof(*p);
455 break;
456 }
457 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
458 bytes_left -= next;
459 }
460
461 if (!nb_iface) {
462 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
463 rc = -EINVAL;
464 goto out;
465 }
466
467 if (bytes_left || p->Next)
468 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
469
470
471 /*
472 * Second pass: extract info to internal structure
473 */
474
475 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
476 if (!*iface_list) {
477 rc = -ENOMEM;
478 goto out;
479 }
480
481 info = *iface_list;
482 bytes_left = buf_len;
483 p = buf;
484 while (bytes_left >= sizeof(*p)) {
485 info->speed = le64_to_cpu(p->LinkSpeed);
486 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
487 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
488
489 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
490 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
491 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
492 le32_to_cpu(p->Capability));
493
494 switch (p->Family) {
495 /*
496 * The kernel and wire socket structures have the same
497 * layout and use network byte order but make the
498 * conversion explicit in case either one changes.
499 */
500 case INTERNETWORK:
501 addr4 = (struct sockaddr_in *)&info->sockaddr;
502 p4 = (struct iface_info_ipv4 *)p->Buffer;
503 addr4->sin_family = AF_INET;
504 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
505
506 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
507 addr4->sin_port = cpu_to_be16(CIFS_PORT);
508
509 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
510 &addr4->sin_addr);
511 break;
512 case INTERNETWORKV6:
513 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
514 p6 = (struct iface_info_ipv6 *)p->Buffer;
515 addr6->sin6_family = AF_INET6;
516 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
517
518 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
519 addr6->sin6_flowinfo = 0;
520 addr6->sin6_scope_id = 0;
521 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
522
523 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
524 &addr6->sin6_addr);
525 break;
526 default:
527 cifs_dbg(VFS,
528 "%s: skipping unsupported socket family\n",
529 __func__);
530 goto next_iface;
531 }
532
533 (*iface_count)++;
534 info++;
535next_iface:
536 next = le32_to_cpu(p->Next);
537 if (!next)
538 break;
539 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
540 bytes_left -= next;
541 }
542
543 if (!*iface_count) {
544 rc = -EINVAL;
545 goto out;
546 }
547
548out:
549 if (rc) {
550 kfree(*iface_list);
551 *iface_count = 0;
552 *iface_list = NULL;
553 }
554 return rc;
555}
556
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200557static int compare_iface(const void *ia, const void *ib)
558{
559 const struct cifs_server_iface *a = (struct cifs_server_iface *)ia;
560 const struct cifs_server_iface *b = (struct cifs_server_iface *)ib;
561
562 return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1);
563}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200564
Steve Frenchc481e9f2013-10-14 01:21:53 -0500565static int
566SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
567{
568 int rc;
569 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200570 struct network_interface_info_ioctl_rsp *out_buf = NULL;
571 struct cifs_server_iface *iface_list;
572 size_t iface_count;
573 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500574
575 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
576 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
577 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500578 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500579 if (rc == -EOPNOTSUPP) {
580 cifs_dbg(FYI,
581 "server does not support query network interfaces\n");
582 goto out;
583 } else if (rc != 0) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000584 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200585 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500586 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200587
588 rc = parse_server_interfaces(out_buf, ret_data_len,
589 &iface_list, &iface_count);
590 if (rc)
591 goto out;
592
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200593 /* sort interfaces from fastest to slowest */
594 sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL);
595
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200596 spin_lock(&ses->iface_lock);
597 kfree(ses->iface_list);
598 ses->iface_list = iface_list;
599 ses->iface_count = iface_count;
600 ses->iface_last_update = jiffies;
601 spin_unlock(&ses->iface_lock);
602
603out:
Steve French24df1482016-09-29 04:20:23 -0500604 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500605 return rc;
606}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500607
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000608static void
609smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000610{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000611 struct cached_fid *cfid = container_of(ref, struct cached_fid,
612 refcount);
613
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000614 if (cfid->is_valid) {
615 cifs_dbg(FYI, "clear cached root file handle\n");
616 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
617 cfid->fid->volatile_fid);
618 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000619 cfid->file_all_info_is_valid = false;
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800620 cfid->has_lease = false;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000621 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000622}
623
624void close_shroot(struct cached_fid *cfid)
625{
626 mutex_lock(&cfid->fid_mutex);
627 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000628 mutex_unlock(&cfid->fid_mutex);
629}
630
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800631void close_shroot_lease_locked(struct cached_fid *cfid)
632{
633 if (cfid->has_lease) {
634 cfid->has_lease = false;
635 kref_put(&cfid->refcount, smb2_close_cached_fid);
636 }
637}
638
639void close_shroot_lease(struct cached_fid *cfid)
640{
641 mutex_lock(&cfid->fid_mutex);
642 close_shroot_lease_locked(cfid);
643 mutex_unlock(&cfid->fid_mutex);
644}
645
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000646void
647smb2_cached_lease_break(struct work_struct *work)
648{
649 struct cached_fid *cfid = container_of(work,
650 struct cached_fid, lease_break);
651
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800652 close_shroot_lease(cfid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000653}
654
Steve French3d4ef9a2018-04-25 22:19:09 -0500655/*
656 * Open the directory at the root of a share
657 */
Amir Goldstein0f060932020-02-03 21:46:43 +0200658int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
659 struct cifs_sb_info *cifs_sb, struct cifs_fid *pfid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500660{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000661 struct cifs_ses *ses = tcon->ses;
662 struct TCP_Server_Info *server = ses->server;
663 struct cifs_open_parms oparms;
664 struct smb2_create_rsp *o_rsp = NULL;
665 struct smb2_query_info_rsp *qi_rsp = NULL;
666 int resp_buftype[2];
667 struct smb_rqst rqst[2];
668 struct kvec rsp_iov[2];
669 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
670 struct kvec qi_iov[1];
671 int rc, flags = 0;
672 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000673 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Steve French3d4ef9a2018-04-25 22:19:09 -0500674
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000675 mutex_lock(&tcon->crfid.fid_mutex);
676 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500677 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000678 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000679 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000680 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500681 return 0;
682 }
683
Steve French96d9f7e2019-09-12 17:52:54 -0500684 /*
685 * We do not hold the lock for the open because in case
686 * SMB2_open needs to reconnect, it will end up calling
687 * cifs_mark_open_files_invalid() which takes the lock again
688 * thus causing a deadlock
689 */
690
691 mutex_unlock(&tcon->crfid.fid_mutex);
692
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000693 if (smb3_encryption_required(tcon))
694 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500695
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000696 memset(rqst, 0, sizeof(rqst));
697 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
698 memset(rsp_iov, 0, sizeof(rsp_iov));
699
700 /* Open */
701 memset(&open_iov, 0, sizeof(open_iov));
702 rqst[0].rq_iov = open_iov;
703 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
704
705 oparms.tcon = tcon;
Amir Goldstein0f060932020-02-03 21:46:43 +0200706 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000707 oparms.desired_access = FILE_READ_ATTRIBUTES;
708 oparms.disposition = FILE_OPEN;
709 oparms.fid = pfid;
710 oparms.reconnect = false;
711
712 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
713 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500714 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000715 smb2_set_next_command(tcon, &rqst[0]);
716
717 memset(&qi_iov, 0, sizeof(qi_iov));
718 rqst[1].rq_iov = qi_iov;
719 rqst[1].rq_nvec = 1;
720
721 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
722 COMPOUND_FID, FILE_ALL_INFORMATION,
723 SMB2_O_INFO_FILE, 0,
724 sizeof(struct smb2_file_all_info) +
725 PATH_MAX * 2, 0, NULL);
726 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500727 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000728
729 smb2_set_related(&rqst[1]);
730
731 rc = compound_send_recv(xid, ses, flags, 2, rqst,
732 resp_buftype, rsp_iov);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200733 mutex_lock(&tcon->crfid.fid_mutex);
734
735 /*
736 * Now we need to check again as the cached root might have
737 * been successfully re-opened from a concurrent process
738 */
739
740 if (tcon->crfid.is_valid) {
741 /* work was already done */
742
743 /* stash fids for close() later */
744 struct cifs_fid fid = {
745 .persistent_fid = pfid->persistent_fid,
746 .volatile_fid = pfid->volatile_fid,
747 };
748
749 /*
750 * caller expects this func to set pfid to a valid
751 * cached root, so we copy the existing one and get a
752 * reference.
753 */
754 memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
755 kref_get(&tcon->crfid.refcount);
756
757 mutex_unlock(&tcon->crfid.fid_mutex);
758
759 if (rc == 0) {
760 /* close extra handle outside of crit sec */
761 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
762 }
763 goto oshr_free;
764 }
765
766 /* Cached root is still invalid, continue normaly */
767
Steve French7dcc82c2019-09-11 00:07:36 -0500768 if (rc) {
769 if (rc == -EREMCHG) {
770 tcon->need_reconnect = true;
771 printk_once(KERN_WARNING "server share %s deleted\n",
772 tcon->treeName);
773 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000774 goto oshr_exit;
Steve French7dcc82c2019-09-11 00:07:36 -0500775 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000776
Steve Frenchd2f15422019-09-22 00:55:46 -0500777 atomic_inc(&tcon->num_remote_opens);
778
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000779 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
780 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
781 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
782#ifdef CONFIG_CIFS_DEBUG2
783 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
784#endif /* CIFS_DEBUG2 */
785
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000786 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
787 tcon->crfid.tcon = tcon;
788 tcon->crfid.is_valid = true;
789 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000790
Steve French89a5bfa2019-07-18 17:22:18 -0500791 /* BB TBD check to see if oplock level check can be removed below */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000792 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
793 kref_get(&tcon->crfid.refcount);
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800794 tcon->crfid.has_lease = true;
Steve French89a5bfa2019-07-18 17:22:18 -0500795 smb2_parse_contexts(server, o_rsp,
796 &oparms.fid->epoch,
797 oparms.fid->lease_key, &oplock, NULL);
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000798 } else
799 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000800
801 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
802 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
803 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000804 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000805 le16_to_cpu(qi_rsp->OutputBufferOffset),
806 sizeof(struct smb2_file_all_info),
807 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000808 (char *)&tcon->crfid.file_all_info))
zhengbin720aec02019-12-25 11:30:20 +0800809 tcon->crfid.file_all_info_is_valid = true;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000810
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200811oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000812 mutex_unlock(&tcon->crfid.fid_mutex);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200813oshr_free:
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000814 SMB2_open_free(&rqst[0]);
815 SMB2_query_info_free(&rqst[1]);
816 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
817 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French3d4ef9a2018-04-25 22:19:09 -0500818 return rc;
819}
820
Steve French34f62642013-10-09 02:07:00 -0500821static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200822smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
823 struct cifs_sb_info *cifs_sb)
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500824{
825 int rc;
826 __le16 srch_path = 0; /* Null - open root of share */
827 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
828 struct cifs_open_parms oparms;
829 struct cifs_fid fid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500830 bool no_cached_open = tcon->nohandlecache;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500831
832 oparms.tcon = tcon;
833 oparms.desired_access = FILE_READ_ATTRIBUTES;
834 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200835 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500836 oparms.fid = &fid;
837 oparms.reconnect = false;
838
Steve French3d4ef9a2018-04-25 22:19:09 -0500839 if (no_cached_open)
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000840 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
841 NULL);
Steve French3d4ef9a2018-04-25 22:19:09 -0500842 else
Amir Goldstein0f060932020-02-03 21:46:43 +0200843 rc = open_shroot(xid, tcon, cifs_sb, &fid);
Steve French3d4ef9a2018-04-25 22:19:09 -0500844
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500845 if (rc)
846 return;
847
Steve Frenchc481e9f2013-10-14 01:21:53 -0500848 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500849
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500850 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
851 FS_ATTRIBUTE_INFORMATION);
852 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
853 FS_DEVICE_INFORMATION);
854 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500855 FS_VOLUME_INFORMATION);
856 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500857 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Steve French3d4ef9a2018-04-25 22:19:09 -0500858 if (no_cached_open)
859 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000860 else
861 close_shroot(&tcon->crfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500862}
863
864static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200865smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
866 struct cifs_sb_info *cifs_sb)
Steve French34f62642013-10-09 02:07:00 -0500867{
868 int rc;
869 __le16 srch_path = 0; /* Null - open root of share */
870 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
871 struct cifs_open_parms oparms;
872 struct cifs_fid fid;
873
874 oparms.tcon = tcon;
875 oparms.desired_access = FILE_READ_ATTRIBUTES;
876 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200877 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French34f62642013-10-09 02:07:00 -0500878 oparms.fid = &fid;
879 oparms.reconnect = false;
880
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000881 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500882 if (rc)
883 return;
884
Steven French21671142013-10-09 13:36:35 -0500885 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
886 FS_ATTRIBUTE_INFORMATION);
887 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
888 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500889 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -0500890}
891
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400892static int
893smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
894 struct cifs_sb_info *cifs_sb, const char *full_path)
895{
896 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400897 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700898 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400899 struct cifs_open_parms oparms;
900 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400901
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000902 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500903 return 0;
904
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400905 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
906 if (!utf16_path)
907 return -ENOMEM;
908
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400909 oparms.tcon = tcon;
910 oparms.desired_access = FILE_READ_ATTRIBUTES;
911 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200912 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400913 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400914 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400915
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000916 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400917 if (rc) {
918 kfree(utf16_path);
919 return rc;
920 }
921
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400922 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400923 kfree(utf16_path);
924 return rc;
925}
926
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +0400927static int
928smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
929 struct cifs_sb_info *cifs_sb, const char *full_path,
930 u64 *uniqueid, FILE_ALL_INFO *data)
931{
932 *uniqueid = le64_to_cpu(data->IndexNumber);
933 return 0;
934}
935
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700936static int
937smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
938 struct cifs_fid *fid, FILE_ALL_INFO *data)
939{
940 int rc;
941 struct smb2_file_all_info *smb2_data;
942
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +0400943 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -0700944 GFP_KERNEL);
945 if (smb2_data == NULL)
946 return -ENOMEM;
947
948 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
949 smb2_data);
950 if (!rc)
951 move_smb2_info_to_cifs(data, smb2_data);
952 kfree(smb2_data);
953 return rc;
954}
955
Arnd Bergmann1368f152017-09-05 11:24:15 +0200956#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000957static ssize_t
958move_smb2_ea_to_cifs(char *dst, size_t dst_size,
959 struct smb2_file_full_ea_info *src, size_t src_size,
960 const unsigned char *ea_name)
961{
962 int rc = 0;
963 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
964 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +1000965 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000966 size_t name_len, value_len, user_name_len;
967
968 while (src_size > 0) {
969 name = &src->ea_data[0];
970 name_len = (size_t)src->ea_name_length;
971 value = &src->ea_data[src->ea_name_length + 1];
972 value_len = (size_t)le16_to_cpu(src->ea_value_length);
973
Christoph Probsta205d502019-05-08 21:36:25 +0200974 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000975 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +1000976
977 if (src_size < 8 + name_len + 1 + value_len) {
978 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
979 rc = -EIO;
980 goto out;
981 }
982
983 if (ea_name) {
984 if (ea_name_len == name_len &&
985 memcmp(ea_name, name, name_len) == 0) {
986 rc = value_len;
987 if (dst_size == 0)
988 goto out;
989 if (dst_size < value_len) {
990 rc = -ERANGE;
991 goto out;
992 }
993 memcpy(dst, value, value_len);
994 goto out;
995 }
996 } else {
997 /* 'user.' plus a terminating null */
998 user_name_len = 5 + 1 + name_len;
999
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001000 if (buf_size == 0) {
1001 /* skip copy - calc size only */
1002 rc += user_name_len;
1003 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001004 dst_size -= user_name_len;
1005 memcpy(dst, "user.", 5);
1006 dst += 5;
1007 memcpy(dst, src->ea_data, name_len);
1008 dst += name_len;
1009 *dst = 0;
1010 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001011 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001012 } else {
1013 /* stop before overrun buffer */
1014 rc = -ERANGE;
1015 break;
1016 }
1017 }
1018
1019 if (!src->next_entry_offset)
1020 break;
1021
1022 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1023 /* stop before overrun buffer */
1024 rc = -ERANGE;
1025 break;
1026 }
1027 src_size -= le32_to_cpu(src->next_entry_offset);
1028 src = (void *)((char *)src +
1029 le32_to_cpu(src->next_entry_offset));
1030 }
1031
1032 /* didn't find the named attribute */
1033 if (ea_name)
1034 rc = -ENODATA;
1035
1036out:
1037 return (ssize_t)rc;
1038}
1039
1040static ssize_t
1041smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1042 const unsigned char *path, const unsigned char *ea_name,
1043 char *ea_data, size_t buf_size,
1044 struct cifs_sb_info *cifs_sb)
1045{
1046 int rc;
1047 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001048 struct kvec rsp_iov = {NULL, 0};
1049 int buftype = CIFS_NO_BUFFER;
1050 struct smb2_query_info_rsp *rsp;
1051 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001052
1053 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1054 if (!utf16_path)
1055 return -ENOMEM;
1056
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001057 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1058 FILE_READ_EA,
1059 FILE_FULL_EA_INFORMATION,
1060 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +10001061 CIFSMaxBufSize -
1062 MAX_SMB2_CREATE_RESPONSE_SIZE -
1063 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001064 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001065 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001066 /*
1067 * If ea_name is NULL (listxattr) and there are no EAs,
1068 * return 0 as it's not an error. Otherwise, the specified
1069 * ea_name was not found.
1070 */
1071 if (!ea_name && rc == -ENODATA)
1072 rc = 0;
1073 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001074 }
1075
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001076 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1077 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1078 le32_to_cpu(rsp->OutputBufferLength),
1079 &rsp_iov,
1080 sizeof(struct smb2_file_full_ea_info));
1081 if (rc)
1082 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001083
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001084 info = (struct smb2_file_full_ea_info *)(
1085 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1086 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1087 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001088
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001089 qeas_exit:
1090 kfree(utf16_path);
1091 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001092 return rc;
1093}
1094
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001095
1096static int
1097smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1098 const char *path, const char *ea_name, const void *ea_value,
1099 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1100 struct cifs_sb_info *cifs_sb)
1101{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001102 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001103 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001104 int ea_name_len = strlen(ea_name);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001105 int flags = 0;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001106 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001107 struct smb_rqst rqst[3];
1108 int resp_buftype[3];
1109 struct kvec rsp_iov[3];
1110 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1111 struct cifs_open_parms oparms;
1112 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1113 struct cifs_fid fid;
1114 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1115 unsigned int size[1];
1116 void *data[1];
1117 struct smb2_file_full_ea_info *ea = NULL;
1118 struct kvec close_iov[1];
1119 int rc;
1120
1121 if (smb3_encryption_required(tcon))
1122 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001123
1124 if (ea_name_len > 255)
1125 return -EINVAL;
1126
1127 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1128 if (!utf16_path)
1129 return -ENOMEM;
1130
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001131 memset(rqst, 0, sizeof(rqst));
1132 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1133 memset(rsp_iov, 0, sizeof(rsp_iov));
1134
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001135 if (ses->server->ops->query_all_EAs) {
1136 if (!ea_value) {
1137 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1138 ea_name, NULL, 0,
1139 cifs_sb);
1140 if (rc == -ENODATA)
1141 goto sea_exit;
1142 }
1143 }
1144
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001145 /* Open */
1146 memset(&open_iov, 0, sizeof(open_iov));
1147 rqst[0].rq_iov = open_iov;
1148 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1149
1150 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001151 oparms.tcon = tcon;
1152 oparms.desired_access = FILE_WRITE_EA;
1153 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001154 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001155 oparms.fid = &fid;
1156 oparms.reconnect = false;
1157
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001158 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
1159 if (rc)
1160 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001161 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001162
1163
1164 /* Set Info */
1165 memset(&si_iov, 0, sizeof(si_iov));
1166 rqst[1].rq_iov = si_iov;
1167 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001168
1169 len = sizeof(ea) + ea_name_len + ea_value_len + 1;
1170 ea = kzalloc(len, GFP_KERNEL);
1171 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001172 rc = -ENOMEM;
1173 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001174 }
1175
1176 ea->ea_name_length = ea_name_len;
1177 ea->ea_value_length = cpu_to_le16(ea_value_len);
1178 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1179 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1180
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001181 size[0] = len;
1182 data[0] = ea;
1183
1184 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
1185 COMPOUND_FID, current->tgid,
1186 FILE_FULL_EA_INFORMATION,
1187 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001188 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001189 smb2_set_related(&rqst[1]);
1190
1191
1192 /* Close */
1193 memset(&close_iov, 0, sizeof(close_iov));
1194 rqst[2].rq_iov = close_iov;
1195 rqst[2].rq_nvec = 1;
Steve French43f8a6a2019-12-02 21:46:54 -06001196 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001197 smb2_set_related(&rqst[2]);
1198
1199 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1200 resp_buftype, rsp_iov);
Steve Frenchd2f15422019-09-22 00:55:46 -05001201 /* no need to bump num_remote_opens because handle immediately closed */
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001202
1203 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001204 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001205 kfree(utf16_path);
1206 SMB2_open_free(&rqst[0]);
1207 SMB2_set_info_free(&rqst[1]);
1208 SMB2_close_free(&rqst[2]);
1209 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1210 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1211 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001212 return rc;
1213}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001214#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001215
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001216static bool
1217smb2_can_echo(struct TCP_Server_Info *server)
1218{
1219 return server->echoes;
1220}
1221
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001222static void
1223smb2_clear_stats(struct cifs_tcon *tcon)
1224{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001225 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001226
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001227 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1228 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1229 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1230 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001231}
1232
1233static void
Steve French769ee6a2013-06-19 14:15:30 -05001234smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1235{
1236 seq_puts(m, "\n\tShare Capabilities:");
1237 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1238 seq_puts(m, " DFS,");
1239 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1240 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1241 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1242 seq_puts(m, " SCALEOUT,");
1243 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1244 seq_puts(m, " CLUSTER,");
1245 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1246 seq_puts(m, " ASYMMETRIC,");
1247 if (tcon->capabilities == 0)
1248 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001249 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1250 seq_puts(m, " Aligned,");
1251 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1252 seq_puts(m, " Partition Aligned,");
1253 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1254 seq_puts(m, " SSD,");
1255 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1256 seq_puts(m, " TRIM-support,");
1257
Steve French769ee6a2013-06-19 14:15:30 -05001258 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001259 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001260 if (tcon->perf_sector_size)
1261 seq_printf(m, "\tOptimal sector size: 0x%x",
1262 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001263 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001264}
1265
1266static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001267smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1268{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001269 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1270 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001271
1272 /*
1273 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1274 * totals (requests sent) since those SMBs are per-session not per tcon
1275 */
Steve French52ce1ac2018-07-31 01:46:47 -05001276 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1277 (long long)(tcon->bytes_read),
1278 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001279 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1280 atomic_read(&tcon->num_local_opens),
1281 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001282 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001283 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1284 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001285 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001286 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1287 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001288 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001289 atomic_read(&sent[SMB2_CREATE_HE]),
1290 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001291 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001292 atomic_read(&sent[SMB2_CLOSE_HE]),
1293 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001294 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001295 atomic_read(&sent[SMB2_FLUSH_HE]),
1296 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001297 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001298 atomic_read(&sent[SMB2_READ_HE]),
1299 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001300 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001301 atomic_read(&sent[SMB2_WRITE_HE]),
1302 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001303 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001304 atomic_read(&sent[SMB2_LOCK_HE]),
1305 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001306 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001307 atomic_read(&sent[SMB2_IOCTL_HE]),
1308 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001309 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001310 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1311 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001312 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001313 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1314 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001315 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001316 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1317 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001318 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001319 atomic_read(&sent[SMB2_SET_INFO_HE]),
1320 atomic_read(&failed[SMB2_SET_INFO_HE]));
1321 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1322 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1323 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001324}
1325
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001326static void
1327smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1328{
David Howells2b0143b2015-03-17 22:25:59 +00001329 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001330 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1331
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001332 cfile->fid.persistent_fid = fid->persistent_fid;
1333 cfile->fid.volatile_fid = fid->volatile_fid;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001334#ifdef CONFIG_CIFS_DEBUG2
1335 cfile->fid.mid = fid->mid;
1336#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001337 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1338 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001339 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001340 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001341}
1342
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001343static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001344smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1345 struct cifs_fid *fid)
1346{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001347 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001348}
1349
Steve French43f8a6a2019-12-02 21:46:54 -06001350static void
1351smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
1352 struct cifsFileInfo *cfile)
1353{
1354 struct smb2_file_network_open_info file_inf;
1355 struct inode *inode;
1356 int rc;
1357
1358 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
1359 cfile->fid.volatile_fid, &file_inf);
1360 if (rc)
1361 return;
1362
1363 inode = d_inode(cfile->dentry);
1364
1365 spin_lock(&inode->i_lock);
1366 CIFS_I(inode)->time = jiffies;
1367
1368 /* Creation time should not need to be updated on close */
1369 if (file_inf.LastWriteTime)
1370 inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
1371 if (file_inf.ChangeTime)
1372 inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
1373 if (file_inf.LastAccessTime)
1374 inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
1375
1376 /*
1377 * i_blocks is not related to (i_size / i_blksize),
1378 * but instead 512 byte (2**9) size is required for
1379 * calculating num blocks.
1380 */
1381 if (le64_to_cpu(file_inf.AllocationSize) > 4096)
1382 inode->i_blocks =
1383 (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
1384
1385 /* End of file and Attributes should not have to be updated on close */
1386 spin_unlock(&inode->i_lock);
1387}
1388
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001389static int
Steve French41c13582013-11-14 00:05:36 -06001390SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1391 u64 persistent_fid, u64 volatile_fid,
1392 struct copychunk_ioctl *pcchunk)
1393{
1394 int rc;
1395 unsigned int ret_data_len;
1396 struct resume_key_req *res_key;
1397
1398 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1399 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001400 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001401 (char **)&res_key, &ret_data_len);
1402
1403 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001404 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
Steve French41c13582013-11-14 00:05:36 -06001405 goto req_res_key_exit;
1406 }
1407 if (ret_data_len < sizeof(struct resume_key_req)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001408 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
Steve French41c13582013-11-14 00:05:36 -06001409 rc = -EINVAL;
1410 goto req_res_key_exit;
1411 }
1412 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1413
1414req_res_key_exit:
1415 kfree(res_key);
1416 return rc;
1417}
1418
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001419static int
1420smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001421 struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02001422 struct cifs_sb_info *cifs_sb,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001423 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001424 unsigned long p)
1425{
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001426 struct cifs_ses *ses = tcon->ses;
1427 char __user *arg = (char __user *)p;
1428 struct smb_query_info qi;
1429 struct smb_query_info __user *pqi;
1430 int rc = 0;
1431 int flags = 0;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001432 struct smb2_query_info_rsp *qi_rsp = NULL;
1433 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001434 void *buffer = NULL;
1435 struct smb_rqst rqst[3];
1436 int resp_buftype[3];
1437 struct kvec rsp_iov[3];
1438 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1439 struct cifs_open_parms oparms;
1440 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1441 struct cifs_fid fid;
1442 struct kvec qi_iov[1];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001443 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001444 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001445 struct kvec close_iov[1];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001446 unsigned int size[2];
1447 void *data[2];
Amir Goldstein0f060932020-02-03 21:46:43 +02001448 int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001449
1450 memset(rqst, 0, sizeof(rqst));
1451 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1452 memset(rsp_iov, 0, sizeof(rsp_iov));
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001453
1454 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1455 return -EFAULT;
1456
1457 if (qi.output_buffer_length > 1024)
1458 return -EINVAL;
1459
1460 if (!ses || !(ses->server))
1461 return -EIO;
1462
1463 if (smb3_encryption_required(tcon))
1464 flags |= CIFS_TRANSFORM_REQ;
1465
Markus Elfringcfaa1182019-11-05 21:30:25 +01001466 buffer = memdup_user(arg + sizeof(struct smb_query_info),
1467 qi.output_buffer_length);
1468 if (IS_ERR(buffer))
1469 return PTR_ERR(buffer);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001470
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001471 /* Open */
1472 memset(&open_iov, 0, sizeof(open_iov));
1473 rqst[0].rq_iov = open_iov;
1474 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001475
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001476 memset(&oparms, 0, sizeof(oparms));
1477 oparms.tcon = tcon;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001478 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001479 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001480 oparms.fid = &fid;
1481 oparms.reconnect = false;
1482
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001483 if (qi.flags & PASSTHRU_FSCTL) {
1484 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1485 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1486 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001487 break;
1488 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1489 oparms.desired_access = GENERIC_ALL;
1490 break;
1491 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1492 oparms.desired_access = GENERIC_READ;
1493 break;
1494 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1495 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001496 break;
1497 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001498 } else if (qi.flags & PASSTHRU_SET_INFO) {
1499 oparms.desired_access = GENERIC_WRITE;
1500 } else {
1501 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001502 }
1503
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001504 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1505 if (rc)
1506 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001507 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001508
1509 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001510 if (qi.flags & PASSTHRU_FSCTL) {
1511 /* Can eventually relax perm check since server enforces too */
1512 if (!capable(CAP_SYS_ADMIN))
1513 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001514 else {
1515 memset(&io_iov, 0, sizeof(io_iov));
1516 rqst[1].rq_iov = io_iov;
1517 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1518
1519 rc = SMB2_ioctl_init(tcon, &rqst[1],
1520 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001521 qi.info_type, true, buffer,
1522 qi.output_buffer_length,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10001523 CIFSMaxBufSize -
1524 MAX_SMB2_CREATE_RESPONSE_SIZE -
1525 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001526 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001527 } else if (qi.flags == PASSTHRU_SET_INFO) {
1528 /* Can eventually relax perm check since server enforces too */
1529 if (!capable(CAP_SYS_ADMIN))
1530 rc = -EPERM;
1531 else {
1532 memset(&si_iov, 0, sizeof(si_iov));
1533 rqst[1].rq_iov = si_iov;
1534 rqst[1].rq_nvec = 1;
1535
1536 size[0] = 8;
1537 data[0] = buffer;
1538
1539 rc = SMB2_set_info_init(tcon, &rqst[1],
1540 COMPOUND_FID, COMPOUND_FID,
1541 current->tgid,
1542 FILE_END_OF_FILE_INFORMATION,
1543 SMB2_O_INFO_FILE, 0, data, size);
1544 }
Steve French31ba4332019-03-13 02:40:07 -05001545 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1546 memset(&qi_iov, 0, sizeof(qi_iov));
1547 rqst[1].rq_iov = qi_iov;
1548 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001549
Steve French31ba4332019-03-13 02:40:07 -05001550 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
1551 COMPOUND_FID, qi.file_info_class,
1552 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001553 qi.input_buffer_length,
1554 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001555 } else { /* unknown flags */
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001556 cifs_tcon_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
Steve French31ba4332019-03-13 02:40:07 -05001557 rc = -EINVAL;
1558 }
1559
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001560 if (rc)
1561 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001562 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001563 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001564
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001565 /* Close */
1566 memset(&close_iov, 0, sizeof(close_iov));
1567 rqst[2].rq_iov = close_iov;
1568 rqst[2].rq_nvec = 1;
1569
Steve French43f8a6a2019-12-02 21:46:54 -06001570 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001571 if (rc)
1572 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001573 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001574
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001575 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1576 resp_buftype, rsp_iov);
1577 if (rc)
1578 goto iqinf_exit;
Steve Frenchd2f15422019-09-22 00:55:46 -05001579
1580 /* No need to bump num_remote_opens since handle immediately closed */
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001581 if (qi.flags & PASSTHRU_FSCTL) {
1582 pqi = (struct smb_query_info __user *)arg;
1583 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1584 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1585 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001586 if (qi.input_buffer_length > 0 &&
Markus Elfring2b1116b2019-11-05 22:26:53 +01001587 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1588 > rsp_iov[1].iov_len)
1589 goto e_fault;
1590
1591 if (copy_to_user(&pqi->input_buffer_length,
1592 &qi.input_buffer_length,
1593 sizeof(qi.input_buffer_length)))
1594 goto e_fault;
1595
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001596 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1597 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
Markus Elfring2b1116b2019-11-05 22:26:53 +01001598 qi.input_buffer_length))
1599 goto e_fault;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001600 } else {
1601 pqi = (struct smb_query_info __user *)arg;
1602 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1603 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1604 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
Markus Elfring2b1116b2019-11-05 22:26:53 +01001605 if (copy_to_user(&pqi->input_buffer_length,
1606 &qi.input_buffer_length,
1607 sizeof(qi.input_buffer_length)))
1608 goto e_fault;
1609
1610 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1611 qi.input_buffer_length))
1612 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001613 }
1614
1615 iqinf_exit:
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001616 kfree(buffer);
1617 SMB2_open_free(&rqst[0]);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001618 if (qi.flags & PASSTHRU_FSCTL)
1619 SMB2_ioctl_free(&rqst[1]);
1620 else
1621 SMB2_query_info_free(&rqst[1]);
1622
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001623 SMB2_close_free(&rqst[2]);
1624 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1625 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1626 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001627 return rc;
Markus Elfring2b1116b2019-11-05 22:26:53 +01001628
1629e_fault:
1630 rc = -EFAULT;
1631 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001632}
1633
Sachin Prabhu620d8742017-02-10 16:03:51 +05301634static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001635smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001636 struct cifsFileInfo *srcfile,
1637 struct cifsFileInfo *trgtfile, u64 src_off,
1638 u64 len, u64 dest_off)
1639{
1640 int rc;
1641 unsigned int ret_data_len;
1642 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001643 struct copychunk_ioctl_rsp *retbuf = NULL;
1644 struct cifs_tcon *tcon;
1645 int chunks_copied = 0;
1646 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301647 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001648
1649 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1650
1651 if (pcchunk == NULL)
1652 return -ENOMEM;
1653
Christoph Probsta205d502019-05-08 21:36:25 +02001654 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001655 /* Request a key from the server to identify the source of the copy */
1656 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1657 srcfile->fid.persistent_fid,
1658 srcfile->fid.volatile_fid, pcchunk);
1659
1660 /* Note: request_res_key sets res_key null only if rc !=0 */
1661 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001662 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001663
1664 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001665 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001666 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001667 pcchunk->Reserved2 = 0;
1668
Steve French9bf0c9c2013-11-16 18:05:28 -06001669 tcon = tlink_tcon(trgtfile->tlink);
1670
1671 while (len > 0) {
1672 pcchunk->SourceOffset = cpu_to_le64(src_off);
1673 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1674 pcchunk->Length =
1675 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1676
1677 /* Request server copy to target from src identified by key */
1678 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001679 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001680 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001681 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1682 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001683 if (rc == 0) {
1684 if (ret_data_len !=
1685 sizeof(struct copychunk_ioctl_rsp)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001686 cifs_tcon_dbg(VFS, "invalid cchunk response size\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001687 rc = -EIO;
1688 goto cchunk_out;
1689 }
1690 if (retbuf->TotalBytesWritten == 0) {
1691 cifs_dbg(FYI, "no bytes copied\n");
1692 rc = -EIO;
1693 goto cchunk_out;
1694 }
1695 /*
1696 * Check if server claimed to write more than we asked
1697 */
1698 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1699 le32_to_cpu(pcchunk->Length)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001700 cifs_tcon_dbg(VFS, "invalid copy chunk response\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001701 rc = -EIO;
1702 goto cchunk_out;
1703 }
1704 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001705 cifs_tcon_dbg(VFS, "invalid num chunks written\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001706 rc = -EIO;
1707 goto cchunk_out;
1708 }
1709 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001710
Sachin Prabhu620d8742017-02-10 16:03:51 +05301711 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1712 src_off += bytes_written;
1713 dest_off += bytes_written;
1714 len -= bytes_written;
1715 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001716
Sachin Prabhu620d8742017-02-10 16:03:51 +05301717 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001718 le32_to_cpu(retbuf->ChunksWritten),
1719 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301720 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001721 } else if (rc == -EINVAL) {
1722 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1723 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001724
Steve French9bf0c9c2013-11-16 18:05:28 -06001725 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1726 le32_to_cpu(retbuf->ChunksWritten),
1727 le32_to_cpu(retbuf->ChunkBytesWritten),
1728 le32_to_cpu(retbuf->TotalBytesWritten));
1729
1730 /*
1731 * Check if this is the first request using these sizes,
1732 * (ie check if copy succeed once with original sizes
1733 * and check if the server gave us different sizes after
1734 * we already updated max sizes on previous request).
1735 * if not then why is the server returning an error now
1736 */
1737 if ((chunks_copied != 0) || chunk_sizes_updated)
1738 goto cchunk_out;
1739
1740 /* Check that server is not asking us to grow size */
1741 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1742 tcon->max_bytes_chunk)
1743 tcon->max_bytes_chunk =
1744 le32_to_cpu(retbuf->ChunkBytesWritten);
1745 else
1746 goto cchunk_out; /* server gave us bogus size */
1747
1748 /* No need to change MaxChunks since already set to 1 */
1749 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001750 } else
1751 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001752 }
1753
1754cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001755 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001756 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301757 if (rc)
1758 return rc;
1759 else
1760 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001761}
1762
1763static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001764smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1765 struct cifs_fid *fid)
1766{
1767 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1768}
1769
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001770static unsigned int
1771smb2_read_data_offset(char *buf)
1772{
1773 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001774
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001775 return rsp->DataOffset;
1776}
1777
1778static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001779smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001780{
1781 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001782
1783 if (in_remaining)
1784 return le32_to_cpu(rsp->DataRemaining);
1785
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001786 return le32_to_cpu(rsp->DataLength);
1787}
1788
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001789
1790static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001791smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001792 struct cifs_io_parms *parms, unsigned int *bytes_read,
1793 char **buf, int *buf_type)
1794{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001795 parms->persistent_fid = pfid->persistent_fid;
1796 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001797 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1798}
1799
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001800static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001801smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001802 struct cifs_io_parms *parms, unsigned int *written,
1803 struct kvec *iov, unsigned long nr_segs)
1804{
1805
Steve Frenchdb8b6312014-09-22 05:13:55 -05001806 parms->persistent_fid = pfid->persistent_fid;
1807 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001808 return SMB2_write(xid, parms, written, iov, nr_segs);
1809}
1810
Steve Frenchd43cc792014-08-13 17:16:29 -05001811/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1812static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1813 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1814{
1815 struct cifsInodeInfo *cifsi;
1816 int rc;
1817
1818 cifsi = CIFS_I(inode);
1819
1820 /* if file already sparse don't bother setting sparse again */
1821 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1822 return true; /* already sparse */
1823
1824 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1825 return true; /* already not sparse */
1826
1827 /*
1828 * Can't check for sparse support on share the usual way via the
1829 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1830 * since Samba server doesn't set the flag on the share, yet
1831 * supports the set sparse FSCTL and returns sparse correctly
1832 * in the file attributes. If we fail setting sparse though we
1833 * mark that server does not support sparse files for this share
1834 * to avoid repeatedly sending the unsupported fsctl to server
1835 * if the file is repeatedly extended.
1836 */
1837 if (tcon->broken_sparse_sup)
1838 return false;
1839
1840 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1841 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001842 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05001843 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05001844 if (rc) {
1845 tcon->broken_sparse_sup = true;
1846 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1847 return false;
1848 }
1849
1850 if (setsparse)
1851 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1852 else
1853 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1854
1855 return true;
1856}
1857
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001858static int
1859smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1860 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1861{
1862 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05001863 struct inode *inode;
1864
1865 /*
1866 * If extending file more than one page make sparse. Many Linux fs
1867 * make files sparse by default when extending via ftruncate
1868 */
David Howells2b0143b2015-03-17 22:25:59 +00001869 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05001870
1871 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05001872 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05001873
Steve Frenchd43cc792014-08-13 17:16:29 -05001874 /* whether set sparse succeeds or not, extend the file */
1875 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05001876 }
1877
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001878 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10001879 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001880}
1881
Steve French02b16662015-06-27 21:18:36 -07001882static int
1883smb2_duplicate_extents(const unsigned int xid,
1884 struct cifsFileInfo *srcfile,
1885 struct cifsFileInfo *trgtfile, u64 src_off,
1886 u64 len, u64 dest_off)
1887{
1888 int rc;
1889 unsigned int ret_data_len;
Steve French02b16662015-06-27 21:18:36 -07001890 struct duplicate_extents_to_file dup_ext_buf;
1891 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1892
1893 /* server fileays advertise duplicate extent support with this flag */
1894 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1895 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1896 return -EOPNOTSUPP;
1897
1898 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1899 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1900 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1901 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1902 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02001903 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07001904 src_off, dest_off, len);
1905
1906 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1907 if (rc)
1908 goto duplicate_extents_out;
1909
1910 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1911 trgtfile->fid.volatile_fid,
1912 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001913 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001914 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07001915 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05001916 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07001917 &ret_data_len);
1918
1919 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02001920 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07001921
1922duplicate_extents_out:
1923 return rc;
1924}
Steve French02b16662015-06-27 21:18:36 -07001925
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001926static int
Steve French64a5cfa2013-10-14 15:31:32 -05001927smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1928 struct cifsFileInfo *cfile)
1929{
1930 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1931 cfile->fid.volatile_fid);
1932}
1933
1934static int
Steve Frenchb3152e22015-06-24 03:17:02 -05001935smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1936 struct cifsFileInfo *cfile)
1937{
1938 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05001939 unsigned int ret_data_len;
1940
1941 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1942 integr_info.Flags = 0;
1943 integr_info.Reserved = 0;
1944
1945 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1946 cfile->fid.volatile_fid,
1947 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001948 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01001949 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05001950 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05001951 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05001952 &ret_data_len);
1953
1954}
1955
Steve Frenche02789a2018-08-09 14:33:12 -05001956/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1957#define GMT_TOKEN_SIZE 50
1958
Steve French153322f2019-03-28 22:32:49 -05001959#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1960
Steve Frenche02789a2018-08-09 14:33:12 -05001961/*
1962 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1963 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1964 */
Steve Frenchb3152e22015-06-24 03:17:02 -05001965static int
Steve French834170c2016-09-30 21:14:26 -05001966smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1967 struct cifsFileInfo *cfile, void __user *ioc_buf)
1968{
1969 char *retbuf = NULL;
1970 unsigned int ret_data_len = 0;
1971 int rc;
Steve French153322f2019-03-28 22:32:49 -05001972 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05001973 struct smb_snapshot_array snapshot_in;
1974
Steve French973189a2019-04-04 00:41:04 -05001975 /*
1976 * On the first query to enumerate the list of snapshots available
1977 * for this volume the buffer begins with 0 (number of snapshots
1978 * which can be returned is zero since at that point we do not know
1979 * how big the buffer needs to be). On the second query,
1980 * it (ret_data_len) is set to number of snapshots so we can
1981 * know to set the maximum response size larger (see below).
1982 */
Steve French153322f2019-03-28 22:32:49 -05001983 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
1984 return -EFAULT;
1985
1986 /*
1987 * Note that for snapshot queries that servers like Azure expect that
1988 * the first query be minimal size (and just used to get the number/size
1989 * of previous versions) so response size must be specified as EXACTLY
1990 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
1991 * of eight bytes.
1992 */
1993 if (ret_data_len == 0)
1994 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
1995 else
1996 max_response_size = CIFSMaxBufSize;
1997
Steve French834170c2016-09-30 21:14:26 -05001998 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1999 cfile->fid.volatile_fid,
2000 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002001 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002002 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05002003 (char **)&retbuf,
2004 &ret_data_len);
2005 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
2006 rc, ret_data_len);
2007 if (rc)
2008 return rc;
2009
2010 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
2011 /* Fixup buffer */
2012 if (copy_from_user(&snapshot_in, ioc_buf,
2013 sizeof(struct smb_snapshot_array))) {
2014 rc = -EFAULT;
2015 kfree(retbuf);
2016 return rc;
2017 }
Steve French834170c2016-09-30 21:14:26 -05002018
Steve Frenche02789a2018-08-09 14:33:12 -05002019 /*
2020 * Check for min size, ie not large enough to fit even one GMT
2021 * token (snapshot). On the first ioctl some users may pass in
2022 * smaller size (or zero) to simply get the size of the array
2023 * so the user space caller can allocate sufficient memory
2024 * and retry the ioctl again with larger array size sufficient
2025 * to hold all of the snapshot GMT tokens on the second try.
2026 */
2027 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2028 ret_data_len = sizeof(struct smb_snapshot_array);
2029
2030 /*
2031 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2032 * the snapshot array (of 50 byte GMT tokens) each
2033 * representing an available previous version of the data
2034 */
2035 if (ret_data_len > (snapshot_in.snapshot_array_size +
2036 sizeof(struct smb_snapshot_array)))
2037 ret_data_len = snapshot_in.snapshot_array_size +
2038 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05002039
2040 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2041 rc = -EFAULT;
2042 }
2043
2044 kfree(retbuf);
2045 return rc;
2046}
2047
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002048
2049
2050static int
2051smb3_notify(const unsigned int xid, struct file *pfile,
2052 void __user *ioc_buf)
2053{
2054 struct smb3_notify notify;
2055 struct dentry *dentry = pfile->f_path.dentry;
2056 struct inode *inode = file_inode(pfile);
2057 struct cifs_sb_info *cifs_sb;
2058 struct cifs_open_parms oparms;
2059 struct cifs_fid fid;
2060 struct cifs_tcon *tcon;
2061 unsigned char *path = NULL;
2062 __le16 *utf16_path = NULL;
2063 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2064 int rc = 0;
2065
2066 path = build_path_from_dentry(dentry);
2067 if (path == NULL)
2068 return -ENOMEM;
2069
2070 cifs_sb = CIFS_SB(inode->i_sb);
2071
2072 utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
2073 if (utf16_path == NULL) {
2074 rc = -ENOMEM;
2075 goto notify_exit;
2076 }
2077
2078 if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
2079 rc = -EFAULT;
2080 goto notify_exit;
2081 }
2082
2083 tcon = cifs_sb_master_tcon(cifs_sb);
2084 oparms.tcon = tcon;
2085 oparms.desired_access = FILE_READ_ATTRIBUTES;
2086 oparms.disposition = FILE_OPEN;
2087 oparms.create_options = cifs_create_options(cifs_sb, 0);
2088 oparms.fid = &fid;
2089 oparms.reconnect = false;
2090
2091 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
2092 if (rc)
2093 goto notify_exit;
2094
2095 rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
2096 notify.watch_tree, notify.completion_filter);
2097
2098 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2099
2100 cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
2101
2102notify_exit:
2103 kfree(path);
2104 kfree(utf16_path);
2105 return rc;
2106}
2107
Steve French834170c2016-09-30 21:14:26 -05002108static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002109smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2110 const char *path, struct cifs_sb_info *cifs_sb,
2111 struct cifs_fid *fid, __u16 search_flags,
2112 struct cifs_search_info *srch_inf)
2113{
2114 __le16 *utf16_path;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002115 struct smb_rqst rqst[2];
2116 struct kvec rsp_iov[2];
2117 int resp_buftype[2];
2118 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2119 struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
2120 int rc, flags = 0;
2121 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002122 struct cifs_open_parms oparms;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002123 struct smb2_query_directory_rsp *qd_rsp = NULL;
2124 struct smb2_create_rsp *op_rsp = NULL;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002125
2126 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2127 if (!utf16_path)
2128 return -ENOMEM;
2129
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002130 if (smb3_encryption_required(tcon))
2131 flags |= CIFS_TRANSFORM_REQ;
2132
2133 memset(rqst, 0, sizeof(rqst));
2134 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
2135 memset(rsp_iov, 0, sizeof(rsp_iov));
2136
2137 /* Open */
2138 memset(&open_iov, 0, sizeof(open_iov));
2139 rqst[0].rq_iov = open_iov;
2140 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2141
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002142 oparms.tcon = tcon;
2143 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2144 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002145 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002146 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002147 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002148
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002149 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
2150 if (rc)
2151 goto qdf_free;
2152 smb2_set_next_command(tcon, &rqst[0]);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002153
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002154 /* Query directory */
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002155 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02002156 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002157
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002158 memset(&qd_iov, 0, sizeof(qd_iov));
2159 rqst[1].rq_iov = qd_iov;
2160 rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
2161
2162 rc = SMB2_query_directory_init(xid, tcon, &rqst[1],
2163 COMPOUND_FID, COMPOUND_FID,
2164 0, srch_inf->info_level);
2165 if (rc)
2166 goto qdf_free;
2167
2168 smb2_set_related(&rqst[1]);
2169
2170 rc = compound_send_recv(xid, tcon->ses, flags, 2, rqst,
2171 resp_buftype, rsp_iov);
2172
2173 /* If the open failed there is nothing to do */
2174 op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
2175 if (op_rsp == NULL || op_rsp->sync_hdr.Status != STATUS_SUCCESS) {
2176 cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
2177 goto qdf_free;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002178 }
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002179 fid->persistent_fid = op_rsp->PersistentFileId;
2180 fid->volatile_fid = op_rsp->VolatileFileId;
2181
2182 /* Anything else than ENODATA means a genuine error */
2183 if (rc && rc != -ENODATA) {
2184 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2185 cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
2186 trace_smb3_query_dir_err(xid, fid->persistent_fid,
2187 tcon->tid, tcon->ses->Suid, 0, 0, rc);
2188 goto qdf_free;
2189 }
2190
2191 qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
2192 if (qd_rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
2193 trace_smb3_query_dir_done(xid, fid->persistent_fid,
2194 tcon->tid, tcon->ses->Suid, 0, 0);
2195 srch_inf->endOfSearch = true;
2196 rc = 0;
2197 goto qdf_free;
2198 }
2199
2200 rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
2201 srch_inf);
2202 if (rc) {
2203 trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
2204 tcon->ses->Suid, 0, 0, rc);
2205 goto qdf_free;
2206 }
2207 resp_buftype[1] = CIFS_NO_BUFFER;
2208
2209 trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
2210 tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
2211
2212 qdf_free:
2213 kfree(utf16_path);
2214 SMB2_open_free(&rqst[0]);
2215 SMB2_query_directory_free(&rqst[1]);
2216 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2217 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002218 return rc;
2219}
2220
2221static int
2222smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2223 struct cifs_fid *fid, __u16 search_flags,
2224 struct cifs_search_info *srch_inf)
2225{
2226 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2227 fid->volatile_fid, 0, srch_inf);
2228}
2229
2230static int
2231smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2232 struct cifs_fid *fid)
2233{
2234 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2235}
2236
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002237/*
Christoph Probsta205d502019-05-08 21:36:25 +02002238 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2239 * the number of credits and return true. Otherwise - return false.
2240 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002241static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002242smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002243{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002244 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002245
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002246 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002247 return false;
2248
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002249 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002250 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002251 server->credits += le16_to_cpu(shdr->CreditRequest);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002252 spin_unlock(&server->req_lock);
2253 wake_up(&server->request_q);
2254 }
2255
2256 return true;
2257}
2258
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002259static bool
2260smb2_is_session_expired(char *buf)
2261{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002262 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002263
Mark Symsd81243c2018-05-24 09:47:31 +01002264 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2265 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002266 return false;
2267
Steve Frenche68a9322018-07-30 14:23:58 -05002268 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2269 le16_to_cpu(shdr->Command),
2270 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002271 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002272
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002273 return true;
2274}
2275
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002276static int
2277smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2278 struct cifsInodeInfo *cinode)
2279{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002280 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2281 return SMB2_lease_break(0, tcon, cinode->lease_key,
2282 smb2_get_lease_state(cinode));
2283
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002284 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2285 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002286 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002287}
2288
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002289void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002290smb2_set_related(struct smb_rqst *rqst)
2291{
2292 struct smb2_sync_hdr *shdr;
2293
2294 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002295 if (shdr == NULL) {
2296 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2297 return;
2298 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002299 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2300}
2301
2302char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2303
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002304void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002305smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002306{
2307 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002308 struct cifs_ses *ses = tcon->ses;
2309 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002310 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002311 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002312
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002313 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2314 if (shdr == NULL) {
2315 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2316 return;
2317 }
2318
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002319 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002320
2321 /* No padding needed */
2322 if (!(len & 7))
2323 goto finished;
2324
2325 num_padding = 8 - (len & 7);
2326 if (!smb3_encryption_required(tcon)) {
2327 /*
2328 * If we do not have encryption then we can just add an extra
2329 * iov for the padding.
2330 */
2331 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2332 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2333 rqst->rq_nvec++;
2334 len += num_padding;
2335 } else {
2336 /*
2337 * We can not add a small padding iov for the encryption case
2338 * because the encryption framework can not handle the padding
2339 * iovs.
2340 * We have to flatten this into a single buffer and add
2341 * the padding to it.
2342 */
2343 for (i = 1; i < rqst->rq_nvec; i++) {
2344 memcpy(rqst->rq_iov[0].iov_base +
2345 rqst->rq_iov[0].iov_len,
2346 rqst->rq_iov[i].iov_base,
2347 rqst->rq_iov[i].iov_len);
2348 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002349 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002350 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2351 0, num_padding);
2352 rqst->rq_iov[0].iov_len += num_padding;
2353 len += num_padding;
2354 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002355 }
2356
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002357 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002358 shdr->NextCommand = cpu_to_le32(len);
2359}
2360
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002361/*
2362 * Passes the query info response back to the caller on success.
2363 * Caller need to free this with free_rsp_buf().
2364 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002365int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002366smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2367 __le16 *utf16_path, u32 desired_access,
2368 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002369 struct kvec *rsp, int *buftype,
2370 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002371{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002372 struct cifs_ses *ses = tcon->ses;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002373 int flags = 0;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002374 struct smb_rqst rqst[3];
2375 int resp_buftype[3];
2376 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002377 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002378 struct kvec qi_iov[1];
2379 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002380 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002381 struct cifs_open_parms oparms;
2382 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002383 int rc;
2384
2385 if (smb3_encryption_required(tcon))
2386 flags |= CIFS_TRANSFORM_REQ;
2387
2388 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002389 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002390 memset(rsp_iov, 0, sizeof(rsp_iov));
2391
2392 memset(&open_iov, 0, sizeof(open_iov));
2393 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002394 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002395
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002396 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002397 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002398 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002399 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002400 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002401 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002402
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002403 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002404 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002405 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002406 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002407
2408 memset(&qi_iov, 0, sizeof(qi_iov));
2409 rqst[1].rq_iov = qi_iov;
2410 rqst[1].rq_nvec = 1;
2411
2412 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002413 class, type, 0,
2414 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002415 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002416 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002417 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002418 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002419 smb2_set_related(&rqst[1]);
2420
2421 memset(&close_iov, 0, sizeof(close_iov));
2422 rqst[2].rq_iov = close_iov;
2423 rqst[2].rq_nvec = 1;
2424
Steve French43f8a6a2019-12-02 21:46:54 -06002425 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002426 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002427 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002428 smb2_set_related(&rqst[2]);
2429
2430 rc = compound_send_recv(xid, ses, flags, 3, rqst,
2431 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002432 if (rc) {
2433 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French7dcc82c2019-09-11 00:07:36 -05002434 if (rc == -EREMCHG) {
2435 tcon->need_reconnect = true;
2436 printk_once(KERN_WARNING "server share %s deleted\n",
2437 tcon->treeName);
2438 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002439 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002440 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002441 *rsp = rsp_iov[1];
2442 *buftype = resp_buftype[1];
2443
2444 qic_exit:
2445 SMB2_open_free(&rqst[0]);
2446 SMB2_query_info_free(&rqst[1]);
2447 SMB2_close_free(&rqst[2]);
2448 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2449 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2450 return rc;
2451}
2452
2453static int
2454smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002455 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002456{
2457 struct smb2_query_info_rsp *rsp;
2458 struct smb2_fs_full_size_info *info = NULL;
2459 __le16 utf16_path = 0; /* Null - open root of share */
2460 struct kvec rsp_iov = {NULL, 0};
2461 int buftype = CIFS_NO_BUFFER;
2462 int rc;
2463
2464
2465 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2466 FILE_READ_ATTRIBUTES,
2467 FS_FULL_SIZE_INFORMATION,
2468 SMB2_O_INFO_FILESYSTEM,
2469 sizeof(struct smb2_fs_full_size_info),
Steve French87f93d82020-02-04 13:02:59 -06002470 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002471 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002472 goto qfs_exit;
2473
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002474 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002475 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002476 info = (struct smb2_fs_full_size_info *)(
2477 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2478 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2479 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002480 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002481 sizeof(struct smb2_fs_full_size_info));
2482 if (!rc)
2483 smb2_copy_fs_info_to_kstatfs(info, buf);
2484
2485qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002486 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002487 return rc;
2488}
2489
Steve French2d304212018-06-24 23:28:12 -05002490static int
2491smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002492 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Steve French2d304212018-06-24 23:28:12 -05002493{
2494 int rc;
2495 __le16 srch_path = 0; /* Null - open root of share */
2496 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2497 struct cifs_open_parms oparms;
2498 struct cifs_fid fid;
2499
2500 if (!tcon->posix_extensions)
Amir Goldstein0f060932020-02-03 21:46:43 +02002501 return smb2_queryfs(xid, tcon, cifs_sb, buf);
Steve French2d304212018-06-24 23:28:12 -05002502
2503 oparms.tcon = tcon;
2504 oparms.desired_access = FILE_READ_ATTRIBUTES;
2505 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002506 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French2d304212018-06-24 23:28:12 -05002507 oparms.fid = &fid;
2508 oparms.reconnect = false;
2509
2510 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
2511 if (rc)
2512 return rc;
2513
2514 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2515 fid.volatile_fid, buf);
2516 buf->f_type = SMB2_MAGIC_NUMBER;
2517 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2518 return rc;
2519}
Steve French2d304212018-06-24 23:28:12 -05002520
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002521static bool
2522smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2523{
2524 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2525 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2526}
2527
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002528static int
2529smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2530 __u64 length, __u32 type, int lock, int unlock, bool wait)
2531{
2532 if (unlock && !lock)
2533 type = SMB2_LOCKFLAG_UNLOCK;
2534 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2535 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2536 current->tgid, length, offset, type, wait);
2537}
2538
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002539static void
2540smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2541{
2542 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2543}
2544
2545static void
2546smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2547{
2548 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2549}
2550
2551static void
2552smb2_new_lease_key(struct cifs_fid *fid)
2553{
Steve Frenchfa70b872016-09-22 00:39:34 -05002554 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002555}
2556
Aurelien Aptel9d496402017-02-13 16:16:49 +01002557static int
2558smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2559 const char *search_name,
2560 struct dfs_info3_param **target_nodes,
2561 unsigned int *num_of_nodes,
2562 const struct nls_table *nls_codepage, int remap)
2563{
2564 int rc;
2565 __le16 *utf16_path = NULL;
2566 int utf16_path_len = 0;
2567 struct cifs_tcon *tcon;
2568 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2569 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2570 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2571
Christoph Probsta205d502019-05-08 21:36:25 +02002572 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002573
2574 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002575 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002576 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002577 tcon = ses->tcon_ipc;
2578 if (tcon == NULL) {
2579 spin_lock(&cifs_tcp_ses_lock);
2580 tcon = list_first_entry_or_null(&ses->tcon_list,
2581 struct cifs_tcon,
2582 tcon_list);
2583 if (tcon)
2584 tcon->tc_count++;
2585 spin_unlock(&cifs_tcp_ses_lock);
2586 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002587
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002588 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002589 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2590 ses);
2591 rc = -ENOTCONN;
2592 goto out;
2593 }
2594
2595 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2596 &utf16_path_len,
2597 nls_codepage, remap);
2598 if (!utf16_path) {
2599 rc = -ENOMEM;
2600 goto out;
2601 }
2602
2603 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2604 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2605 if (!dfs_req) {
2606 rc = -ENOMEM;
2607 goto out;
2608 }
2609
2610 /* Highest DFS referral version understood */
2611 dfs_req->MaxReferralLevel = DFS_VERSION;
2612
2613 /* Path to resolve in an UTF-16 null-terminated string */
2614 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2615
2616 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002617 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2618 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002619 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002620 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002621 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002622 } while (rc == -EAGAIN);
2623
2624 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002625 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002626 cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002627 goto out;
2628 }
2629
2630 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2631 num_of_nodes, target_nodes,
2632 nls_codepage, remap, search_name,
2633 true /* is_unicode */);
2634 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002635 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002636 goto out;
2637 }
2638
2639 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002640 if (tcon && !tcon->ipc) {
2641 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002642 spin_lock(&cifs_tcp_ses_lock);
2643 tcon->tc_count--;
2644 spin_unlock(&cifs_tcp_ses_lock);
2645 }
2646 kfree(utf16_path);
2647 kfree(dfs_req);
2648 kfree(dfs_rsp);
2649 return rc;
2650}
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002651
2652static int
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002653parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2654 u32 plen, char **target_path,
2655 struct cifs_sb_info *cifs_sb)
2656{
2657 unsigned int len;
2658
2659 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2660 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2661
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002662 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2663 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2664 le64_to_cpu(symlink_buf->InodeType));
2665 return -EOPNOTSUPP;
2666 }
2667
2668 *target_path = cifs_strndup_from_utf16(
2669 symlink_buf->PathBuffer,
2670 len, true, cifs_sb->local_nls);
2671 if (!(*target_path))
2672 return -ENOMEM;
2673
2674 convert_delimiter(*target_path, '/');
2675 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2676
2677 return 0;
2678}
2679
2680static int
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002681parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2682 u32 plen, char **target_path,
2683 struct cifs_sb_info *cifs_sb)
2684{
2685 unsigned int sub_len;
2686 unsigned int sub_offset;
2687
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002688 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002689
2690 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2691 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2692 if (sub_offset + 20 > plen ||
2693 sub_offset + sub_len + 20 > plen) {
2694 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2695 return -EIO;
2696 }
2697
2698 *target_path = cifs_strndup_from_utf16(
2699 symlink_buf->PathBuffer + sub_offset,
2700 sub_len, true, cifs_sb->local_nls);
2701 if (!(*target_path))
2702 return -ENOMEM;
2703
2704 convert_delimiter(*target_path, '/');
2705 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2706
2707 return 0;
2708}
2709
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002710static int
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002711parse_reparse_point(struct reparse_data_buffer *buf,
2712 u32 plen, char **target_path,
2713 struct cifs_sb_info *cifs_sb)
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002714{
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002715 if (plen < sizeof(struct reparse_data_buffer)) {
2716 cifs_dbg(VFS, "reparse buffer is too small. Must be "
2717 "at least 8 bytes but was %d\n", plen);
2718 return -EIO;
2719 }
2720
2721 if (plen < le16_to_cpu(buf->ReparseDataLength) +
2722 sizeof(struct reparse_data_buffer)) {
2723 cifs_dbg(VFS, "srv returned invalid reparse buf "
2724 "length: %d\n", plen);
2725 return -EIO;
2726 }
2727
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002728 /* See MS-FSCC 2.1.2 */
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002729 switch (le32_to_cpu(buf->ReparseTag)) {
2730 case IO_REPARSE_TAG_NFS:
2731 return parse_reparse_posix(
2732 (struct reparse_posix_data *)buf,
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002733 plen, target_path, cifs_sb);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002734 case IO_REPARSE_TAG_SYMLINK:
2735 return parse_reparse_symlink(
2736 (struct reparse_symlink_data_buffer *)buf,
2737 plen, target_path, cifs_sb);
2738 default:
2739 cifs_dbg(VFS, "srv returned unknown symlink buffer "
2740 "tag:0x%08x\n", le32_to_cpu(buf->ReparseTag));
2741 return -EOPNOTSUPP;
2742 }
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002743}
2744
Pavel Shilovsky78932422016-07-24 10:37:38 +03002745#define SMB2_SYMLINK_STRUCT_SIZE \
2746 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2747
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002748static int
2749smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002750 struct cifs_sb_info *cifs_sb, const char *full_path,
2751 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002752{
2753 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002754 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002755 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2756 struct cifs_open_parms oparms;
2757 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002758 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002759 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002760 struct smb2_symlink_err_rsp *symlink;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002761 unsigned int sub_len;
2762 unsigned int sub_offset;
2763 unsigned int print_len;
2764 unsigned int print_offset;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002765 int flags = 0;
2766 struct smb_rqst rqst[3];
2767 int resp_buftype[3];
2768 struct kvec rsp_iov[3];
2769 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2770 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
2771 struct kvec close_iov[1];
2772 struct smb2_create_rsp *create_rsp;
2773 struct smb2_ioctl_rsp *ioctl_rsp;
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002774 struct reparse_data_buffer *reparse_buf;
Amir Goldstein0f060932020-02-03 21:46:43 +02002775 int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002776 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002777
2778 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2779
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002780 *target_path = NULL;
2781
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002782 if (smb3_encryption_required(tcon))
2783 flags |= CIFS_TRANSFORM_REQ;
2784
2785 memset(rqst, 0, sizeof(rqst));
2786 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2787 memset(rsp_iov, 0, sizeof(rsp_iov));
2788
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002789 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2790 if (!utf16_path)
2791 return -ENOMEM;
2792
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002793 /* Open */
2794 memset(&open_iov, 0, sizeof(open_iov));
2795 rqst[0].rq_iov = open_iov;
2796 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2797
2798 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002799 oparms.tcon = tcon;
2800 oparms.desired_access = FILE_READ_ATTRIBUTES;
2801 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002802 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002803 oparms.fid = &fid;
2804 oparms.reconnect = false;
2805
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002806 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
2807 if (rc)
2808 goto querty_exit;
2809 smb2_set_next_command(tcon, &rqst[0]);
2810
2811
2812 /* IOCTL */
2813 memset(&io_iov, 0, sizeof(io_iov));
2814 rqst[1].rq_iov = io_iov;
2815 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
2816
2817 rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
2818 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10002819 true /* is_fctl */, NULL, 0,
2820 CIFSMaxBufSize -
2821 MAX_SMB2_CREATE_RESPONSE_SIZE -
2822 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002823 if (rc)
2824 goto querty_exit;
2825
2826 smb2_set_next_command(tcon, &rqst[1]);
2827 smb2_set_related(&rqst[1]);
2828
2829
2830 /* Close */
2831 memset(&close_iov, 0, sizeof(close_iov));
2832 rqst[2].rq_iov = close_iov;
2833 rqst[2].rq_nvec = 1;
2834
Steve French43f8a6a2019-12-02 21:46:54 -06002835 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002836 if (rc)
2837 goto querty_exit;
2838
2839 smb2_set_related(&rqst[2]);
2840
2841 rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
2842 resp_buftype, rsp_iov);
2843
2844 create_rsp = rsp_iov[0].iov_base;
2845 if (create_rsp && create_rsp->sync_hdr.Status)
2846 err_iov = rsp_iov[0];
2847 ioctl_rsp = rsp_iov[1].iov_base;
2848
2849 /*
2850 * Open was successful and we got an ioctl response.
2851 */
2852 if ((rc == 0) && (is_reparse_point)) {
2853 /* See MS-FSCC 2.3.23 */
2854
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002855 reparse_buf = (struct reparse_data_buffer *)
2856 ((char *)ioctl_rsp +
2857 le32_to_cpu(ioctl_rsp->OutputOffset));
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002858 plen = le32_to_cpu(ioctl_rsp->OutputCount);
2859
2860 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
2861 rsp_iov[1].iov_len) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002862 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002863 plen);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002864 rc = -EIO;
2865 goto querty_exit;
2866 }
2867
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002868 rc = parse_reparse_point(reparse_buf, plen, target_path,
2869 cifs_sb);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002870 goto querty_exit;
2871 }
2872
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05002873 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002874 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002875 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002876 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03002877
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002878 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002879 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002880 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002881 rc = -EINVAL;
2882 goto querty_exit;
2883 }
2884
2885 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2886 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
2887 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
2888 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002889 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002890 }
2891
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002892 /* open must fail on symlink - reset rc */
2893 rc = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002894 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2895 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002896 print_len = le16_to_cpu(symlink->PrintNameLength);
2897 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2898
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002899 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002900 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002901 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002902 }
2903
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10002904 if (err_iov.iov_len <
2905 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10002906 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002907 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03002908 }
2909
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002910 *target_path = cifs_strndup_from_utf16(
2911 (char *)symlink->PathBuffer + sub_offset,
2912 sub_len, true, cifs_sb->local_nls);
2913 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002914 rc = -ENOMEM;
2915 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002916 }
2917 convert_delimiter(*target_path, '/');
2918 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002919
2920 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002921 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002922 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002923 SMB2_open_free(&rqst[0]);
2924 SMB2_ioctl_free(&rqst[1]);
2925 SMB2_close_free(&rqst[2]);
2926 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2927 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2928 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002929 return rc;
2930}
2931
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002932static struct cifs_ntsd *
2933get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2934 const struct cifs_fid *cifsfid, u32 *pacllen)
2935{
2936 struct cifs_ntsd *pntsd = NULL;
2937 unsigned int xid;
2938 int rc = -EOPNOTSUPP;
2939 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2940
2941 if (IS_ERR(tlink))
2942 return ERR_CAST(tlink);
2943
2944 xid = get_xid();
2945 cifs_dbg(FYI, "trying to get acl\n");
2946
2947 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2948 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2949 free_xid(xid);
2950
2951 cifs_put_tlink(tlink);
2952
2953 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2954 if (rc)
2955 return ERR_PTR(rc);
2956 return pntsd;
2957
2958}
2959
2960static struct cifs_ntsd *
2961get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2962 const char *path, u32 *pacllen)
2963{
2964 struct cifs_ntsd *pntsd = NULL;
2965 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2966 unsigned int xid;
2967 int rc;
2968 struct cifs_tcon *tcon;
2969 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2970 struct cifs_fid fid;
2971 struct cifs_open_parms oparms;
2972 __le16 *utf16_path;
2973
2974 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
2975 if (IS_ERR(tlink))
2976 return ERR_CAST(tlink);
2977
2978 tcon = tlink_tcon(tlink);
2979 xid = get_xid();
2980
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002981 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05002982 if (!utf16_path) {
2983 rc = -ENOMEM;
2984 free_xid(xid);
2985 return ERR_PTR(rc);
2986 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002987
2988 oparms.tcon = tcon;
2989 oparms.desired_access = READ_CONTROL;
2990 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002991 oparms.create_options = cifs_create_options(cifs_sb, 0);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002992 oparms.fid = &fid;
2993 oparms.reconnect = false;
2994
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002995 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05002996 kfree(utf16_path);
2997 if (!rc) {
2998 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2999 fid.volatile_fid, (void **)&pntsd, pacllen);
3000 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3001 }
3002
3003 cifs_put_tlink(tlink);
3004 free_xid(xid);
3005
3006 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3007 if (rc)
3008 return ERR_PTR(rc);
3009 return pntsd;
3010}
3011
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003012static int
3013set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
3014 struct inode *inode, const char *path, int aclflag)
3015{
3016 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3017 unsigned int xid;
3018 int rc, access_flags = 0;
3019 struct cifs_tcon *tcon;
3020 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3021 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3022 struct cifs_fid fid;
3023 struct cifs_open_parms oparms;
3024 __le16 *utf16_path;
3025
3026 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
3027 if (IS_ERR(tlink))
3028 return PTR_ERR(tlink);
3029
3030 tcon = tlink_tcon(tlink);
3031 xid = get_xid();
3032
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003033 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
3034 access_flags = WRITE_OWNER;
3035 else
3036 access_flags = WRITE_DAC;
3037
3038 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05003039 if (!utf16_path) {
3040 rc = -ENOMEM;
3041 free_xid(xid);
3042 return rc;
3043 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003044
3045 oparms.tcon = tcon;
3046 oparms.desired_access = access_flags;
Amir Goldstein0f060932020-02-03 21:46:43 +02003047 oparms.create_options = cifs_create_options(cifs_sb, 0);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003048 oparms.disposition = FILE_OPEN;
3049 oparms.path = path;
3050 oparms.fid = &fid;
3051 oparms.reconnect = false;
3052
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003053 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003054 kfree(utf16_path);
3055 if (!rc) {
3056 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3057 fid.volatile_fid, pnntsd, acllen, aclflag);
3058 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3059 }
3060
3061 cifs_put_tlink(tlink);
3062 free_xid(xid);
3063 return rc;
3064}
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003065
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003066/* Retrieve an ACL from the server */
3067static struct cifs_ntsd *
3068get_smb2_acl(struct cifs_sb_info *cifs_sb,
3069 struct inode *inode, const char *path,
3070 u32 *pacllen)
3071{
3072 struct cifs_ntsd *pntsd = NULL;
3073 struct cifsFileInfo *open_file = NULL;
3074
3075 if (inode)
3076 open_file = find_readable_file(CIFS_I(inode), true);
3077 if (!open_file)
3078 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
3079
3080 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
3081 cifsFileInfo_put(open_file);
3082 return pntsd;
3083}
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003084
Steve French30175622014-08-17 18:16:40 -05003085static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
3086 loff_t offset, loff_t len, bool keep_size)
3087{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003088 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05003089 struct inode *inode;
3090 struct cifsInodeInfo *cifsi;
3091 struct cifsFileInfo *cfile = file->private_data;
3092 struct file_zero_data_information fsctl_buf;
3093 long rc;
3094 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003095 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05003096
3097 xid = get_xid();
3098
David Howells2b0143b2015-03-17 22:25:59 +00003099 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05003100 cifsi = CIFS_I(inode);
3101
Christoph Probsta205d502019-05-08 21:36:25 +02003102 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05003103 ses->Suid, offset, len);
3104
3105
Steve French30175622014-08-17 18:16:40 -05003106 /* if file not oplocked can't be sure whether asking to extend size */
3107 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003108 if (keep_size == false) {
3109 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003110 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
3111 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003112 free_xid(xid);
3113 return rc;
3114 }
Steve French30175622014-08-17 18:16:40 -05003115
Steve Frenchd1c35af2019-05-09 00:09:37 -05003116 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05003117
3118 fsctl_buf.FileOffset = cpu_to_le64(offset);
3119 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3120
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003121 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3122 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
3123 (char *)&fsctl_buf,
3124 sizeof(struct file_zero_data_information),
3125 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003126 if (rc)
3127 goto zero_range_exit;
3128
3129 /*
3130 * do we also need to change the size of the file?
3131 */
3132 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003133 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003134 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3135 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003136 }
3137
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003138 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05003139 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05003140 if (rc)
3141 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3142 ses->Suid, offset, len, rc);
3143 else
3144 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3145 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05003146 return rc;
3147}
3148
Steve French31742c52014-08-17 08:38:47 -05003149static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3150 loff_t offset, loff_t len)
3151{
3152 struct inode *inode;
Steve French31742c52014-08-17 08:38:47 -05003153 struct cifsFileInfo *cfile = file->private_data;
3154 struct file_zero_data_information fsctl_buf;
3155 long rc;
3156 unsigned int xid;
3157 __u8 set_sparse = 1;
3158
3159 xid = get_xid();
3160
David Howells2b0143b2015-03-17 22:25:59 +00003161 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05003162
3163 /* Need to make file sparse, if not already, before freeing range. */
3164 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05003165 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3166 rc = -EOPNOTSUPP;
3167 free_xid(xid);
3168 return rc;
3169 }
Steve French31742c52014-08-17 08:38:47 -05003170
Christoph Probsta205d502019-05-08 21:36:25 +02003171 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05003172
3173 fsctl_buf.FileOffset = cpu_to_le64(offset);
3174 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3175
3176 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3177 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01003178 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05003179 sizeof(struct file_zero_data_information),
3180 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05003181 free_xid(xid);
3182 return rc;
3183}
3184
Steve French9ccf3212014-10-18 17:01:15 -05003185static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3186 loff_t off, loff_t len, bool keep_size)
3187{
3188 struct inode *inode;
3189 struct cifsInodeInfo *cifsi;
3190 struct cifsFileInfo *cfile = file->private_data;
3191 long rc = -EOPNOTSUPP;
3192 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003193 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05003194
3195 xid = get_xid();
3196
David Howells2b0143b2015-03-17 22:25:59 +00003197 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05003198 cifsi = CIFS_I(inode);
3199
Steve French779ede02019-03-13 01:41:49 -05003200 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3201 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003202 /* if file not oplocked can't be sure whether asking to extend size */
3203 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003204 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05003205 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3206 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003207 free_xid(xid);
3208 return rc;
3209 }
Steve French9ccf3212014-10-18 17:01:15 -05003210
3211 /*
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003212 * Extending the file
3213 */
3214 if ((keep_size == false) && i_size_read(inode) < off + len) {
3215 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0)
3216 smb2_set_sparse(xid, tcon, cfile, inode, false);
3217
3218 eof = cpu_to_le64(off + len);
3219 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3220 cfile->fid.volatile_fid, cfile->pid, &eof);
3221 if (rc == 0) {
3222 cifsi->server_eof = off + len;
3223 cifs_setsize(inode, off + len);
3224 cifs_truncate_page(inode->i_mapping, inode->i_size);
3225 truncate_setsize(inode, off + len);
3226 }
3227 goto out;
3228 }
3229
3230 /*
Steve French9ccf3212014-10-18 17:01:15 -05003231 * Files are non-sparse by default so falloc may be a no-op
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003232 * Must check if file sparse. If not sparse, and since we are not
3233 * extending then no need to do anything since file already allocated
Steve French9ccf3212014-10-18 17:01:15 -05003234 */
3235 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003236 rc = 0;
3237 goto out;
Steve French9ccf3212014-10-18 17:01:15 -05003238 }
3239
3240 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3241 /*
3242 * Check if falloc starts within first few pages of file
3243 * and ends within a few pages of the end of file to
3244 * ensure that most of file is being forced to be
3245 * fallocated now. If so then setting whole file sparse
3246 * ie potentially making a few extra pages at the beginning
3247 * or end of the file non-sparse via set_sparse is harmless.
3248 */
Steve Frenchcfe89092018-05-19 02:04:55 -05003249 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3250 rc = -EOPNOTSUPP;
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003251 goto out;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003252 }
Steve French9ccf3212014-10-18 17:01:15 -05003253 }
Steve French9ccf3212014-10-18 17:01:15 -05003254
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003255 smb2_set_sparse(xid, tcon, cfile, inode, false);
3256 rc = 0;
3257
3258out:
Steve French779ede02019-03-13 01:41:49 -05003259 if (rc)
3260 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3261 tcon->ses->Suid, off, len, rc);
3262 else
3263 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3264 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003265
3266 free_xid(xid);
3267 return rc;
3268}
3269
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003270static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3271{
3272 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3273 struct cifsInodeInfo *cifsi;
3274 struct inode *inode;
3275 int rc = 0;
3276 struct file_allocated_range_buffer in_data, *out_data = NULL;
3277 u32 out_data_len;
3278 unsigned int xid;
3279
3280 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3281 return generic_file_llseek(file, offset, whence);
3282
3283 inode = d_inode(cfile->dentry);
3284 cifsi = CIFS_I(inode);
3285
3286 if (offset < 0 || offset >= i_size_read(inode))
3287 return -ENXIO;
3288
3289 xid = get_xid();
3290 /*
3291 * We need to be sure that all dirty pages are written as they
3292 * might fill holes on the server.
3293 * Note that we also MUST flush any written pages since at least
3294 * some servers (Windows2016) will not reflect recent writes in
3295 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3296 */
3297 wrcfile = find_writable_file(cifsi, false);
3298 if (wrcfile) {
3299 filemap_write_and_wait(inode->i_mapping);
3300 smb2_flush_file(xid, tcon, &wrcfile->fid);
3301 cifsFileInfo_put(wrcfile);
3302 }
3303
3304 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3305 if (whence == SEEK_HOLE)
3306 offset = i_size_read(inode);
3307 goto lseek_exit;
3308 }
3309
3310 in_data.file_offset = cpu_to_le64(offset);
3311 in_data.length = cpu_to_le64(i_size_read(inode));
3312
3313 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3314 cfile->fid.volatile_fid,
3315 FSCTL_QUERY_ALLOCATED_RANGES, true,
3316 (char *)&in_data, sizeof(in_data),
3317 sizeof(struct file_allocated_range_buffer),
3318 (char **)&out_data, &out_data_len);
3319 if (rc == -E2BIG)
3320 rc = 0;
3321 if (rc)
3322 goto lseek_exit;
3323
3324 if (whence == SEEK_HOLE && out_data_len == 0)
3325 goto lseek_exit;
3326
3327 if (whence == SEEK_DATA && out_data_len == 0) {
3328 rc = -ENXIO;
3329 goto lseek_exit;
3330 }
3331
3332 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3333 rc = -EINVAL;
3334 goto lseek_exit;
3335 }
3336 if (whence == SEEK_DATA) {
3337 offset = le64_to_cpu(out_data->file_offset);
3338 goto lseek_exit;
3339 }
3340 if (offset < le64_to_cpu(out_data->file_offset))
3341 goto lseek_exit;
3342
3343 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3344
3345 lseek_exit:
3346 free_xid(xid);
3347 kfree(out_data);
3348 if (!rc)
3349 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3350 else
3351 return rc;
3352}
3353
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003354static int smb3_fiemap(struct cifs_tcon *tcon,
3355 struct cifsFileInfo *cfile,
3356 struct fiemap_extent_info *fei, u64 start, u64 len)
3357{
3358 unsigned int xid;
3359 struct file_allocated_range_buffer in_data, *out_data;
3360 u32 out_data_len;
3361 int i, num, rc, flags, last_blob;
3362 u64 next;
3363
3364 if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
3365 return -EBADR;
3366
3367 xid = get_xid();
3368 again:
3369 in_data.file_offset = cpu_to_le64(start);
3370 in_data.length = cpu_to_le64(len);
3371
3372 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3373 cfile->fid.volatile_fid,
3374 FSCTL_QUERY_ALLOCATED_RANGES, true,
3375 (char *)&in_data, sizeof(in_data),
3376 1024 * sizeof(struct file_allocated_range_buffer),
3377 (char **)&out_data, &out_data_len);
3378 if (rc == -E2BIG) {
3379 last_blob = 0;
3380 rc = 0;
3381 } else
3382 last_blob = 1;
3383 if (rc)
3384 goto out;
3385
3386 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3387 rc = -EINVAL;
3388 goto out;
3389 }
3390 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3391 rc = -EINVAL;
3392 goto out;
3393 }
3394
3395 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3396 for (i = 0; i < num; i++) {
3397 flags = 0;
3398 if (i == num - 1 && last_blob)
3399 flags |= FIEMAP_EXTENT_LAST;
3400
3401 rc = fiemap_fill_next_extent(fei,
3402 le64_to_cpu(out_data[i].file_offset),
3403 le64_to_cpu(out_data[i].file_offset),
3404 le64_to_cpu(out_data[i].length),
3405 flags);
3406 if (rc < 0)
3407 goto out;
3408 if (rc == 1) {
3409 rc = 0;
3410 goto out;
3411 }
3412 }
3413
3414 if (!last_blob) {
3415 next = le64_to_cpu(out_data[num - 1].file_offset) +
3416 le64_to_cpu(out_data[num - 1].length);
3417 len = len - (next - start);
3418 start = next;
3419 goto again;
3420 }
3421
3422 out:
3423 free_xid(xid);
3424 kfree(out_data);
3425 return rc;
3426}
Steve French9ccf3212014-10-18 17:01:15 -05003427
Steve French31742c52014-08-17 08:38:47 -05003428static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3429 loff_t off, loff_t len)
3430{
3431 /* KEEP_SIZE already checked for by do_fallocate */
3432 if (mode & FALLOC_FL_PUNCH_HOLE)
3433 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05003434 else if (mode & FALLOC_FL_ZERO_RANGE) {
3435 if (mode & FALLOC_FL_KEEP_SIZE)
3436 return smb3_zero_range(file, tcon, off, len, true);
3437 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05003438 } else if (mode == FALLOC_FL_KEEP_SIZE)
3439 return smb3_simple_falloc(file, tcon, off, len, true);
3440 else if (mode == 0)
3441 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05003442
3443 return -EOPNOTSUPP;
3444}
3445
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003446static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003447smb2_downgrade_oplock(struct TCP_Server_Info *server,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003448 struct cifsInodeInfo *cinode, __u32 oplock,
3449 unsigned int epoch, bool *purge_cache)
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003450{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003451 server->ops->set_oplock_level(cinode, oplock, 0, NULL);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003452}
3453
3454static void
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003455smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3456 unsigned int epoch, bool *purge_cache);
3457
3458static void
3459smb3_downgrade_oplock(struct TCP_Server_Info *server,
3460 struct cifsInodeInfo *cinode, __u32 oplock,
3461 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003462{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003463 unsigned int old_state = cinode->oplock;
3464 unsigned int old_epoch = cinode->epoch;
3465 unsigned int new_state;
3466
3467 if (epoch > old_epoch) {
3468 smb21_set_oplock_level(cinode, oplock, 0, NULL);
3469 cinode->epoch = epoch;
3470 }
3471
3472 new_state = cinode->oplock;
3473 *purge_cache = false;
3474
3475 if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
3476 (new_state & CIFS_CACHE_READ_FLG) == 0)
3477 *purge_cache = true;
3478 else if (old_state == new_state && (epoch - old_epoch > 1))
3479 *purge_cache = true;
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003480}
3481
3482static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003483smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3484 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003485{
3486 oplock &= 0xFF;
3487 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3488 return;
3489 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003490 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003491 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3492 &cinode->vfs_inode);
3493 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003494 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003495 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3496 &cinode->vfs_inode);
3497 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3498 cinode->oplock = CIFS_CACHE_READ_FLG;
3499 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3500 &cinode->vfs_inode);
3501 } else
3502 cinode->oplock = 0;
3503}
3504
3505static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003506smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3507 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003508{
3509 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003510 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003511
3512 oplock &= 0xFF;
3513 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3514 return;
3515
Pavel Shilovskya016e272019-09-26 12:31:20 -07003516 /* Check if the server granted an oplock rather than a lease */
3517 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3518 return smb2_set_oplock_level(cinode, oplock, epoch,
3519 purge_cache);
3520
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003521 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003522 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003523 strcat(message, "R");
3524 }
3525 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003526 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003527 strcat(message, "H");
3528 }
3529 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003530 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003531 strcat(message, "W");
3532 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003533 if (!new_oplock)
3534 strncpy(message, "None", sizeof(message));
3535
3536 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003537 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3538 &cinode->vfs_inode);
3539}
3540
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003541static void
3542smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3543 unsigned int epoch, bool *purge_cache)
3544{
3545 unsigned int old_oplock = cinode->oplock;
3546
3547 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3548
3549 if (purge_cache) {
3550 *purge_cache = false;
3551 if (old_oplock == CIFS_CACHE_READ_FLG) {
3552 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3553 (epoch - cinode->epoch > 0))
3554 *purge_cache = true;
3555 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3556 (epoch - cinode->epoch > 1))
3557 *purge_cache = true;
3558 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3559 (epoch - cinode->epoch > 1))
3560 *purge_cache = true;
3561 else if (cinode->oplock == 0 &&
3562 (epoch - cinode->epoch > 0))
3563 *purge_cache = true;
3564 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
3565 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3566 (epoch - cinode->epoch > 0))
3567 *purge_cache = true;
3568 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3569 (epoch - cinode->epoch > 1))
3570 *purge_cache = true;
3571 }
3572 cinode->epoch = epoch;
3573 }
3574}
3575
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003576static bool
3577smb2_is_read_op(__u32 oplock)
3578{
3579 return oplock == SMB2_OPLOCK_LEVEL_II;
3580}
3581
3582static bool
3583smb21_is_read_op(__u32 oplock)
3584{
3585 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3586 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3587}
3588
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003589static __le32
3590map_oplock_to_lease(u8 oplock)
3591{
3592 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3593 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3594 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3595 return SMB2_LEASE_READ_CACHING;
3596 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3597 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3598 SMB2_LEASE_WRITE_CACHING;
3599 return 0;
3600}
3601
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003602static char *
3603smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3604{
3605 struct create_lease *buf;
3606
3607 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
3608 if (!buf)
3609 return NULL;
3610
Stefano Brivio729c0c92018-07-05 15:10:02 +02003611 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003612 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003613
3614 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3615 (struct create_lease, lcontext));
3616 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
3617 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3618 (struct create_lease, Name));
3619 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003620 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003621 buf->Name[0] = 'R';
3622 buf->Name[1] = 'q';
3623 buf->Name[2] = 'L';
3624 buf->Name[3] = 's';
3625 return (char *)buf;
3626}
3627
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003628static char *
3629smb3_create_lease_buf(u8 *lease_key, u8 oplock)
3630{
3631 struct create_lease_v2 *buf;
3632
3633 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
3634 if (!buf)
3635 return NULL;
3636
Stefano Brivio729c0c92018-07-05 15:10:02 +02003637 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003638 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3639
3640 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3641 (struct create_lease_v2, lcontext));
3642 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
3643 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3644 (struct create_lease_v2, Name));
3645 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07003646 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003647 buf->Name[0] = 'R';
3648 buf->Name[1] = 'q';
3649 buf->Name[2] = 'L';
3650 buf->Name[3] = 's';
3651 return (char *)buf;
3652}
3653
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003654static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003655smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003656{
3657 struct create_lease *lc = (struct create_lease *)buf;
3658
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003659 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04003660 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3661 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3662 return le32_to_cpu(lc->lcontext.LeaseState);
3663}
3664
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003665static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003666smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003667{
3668 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
3669
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003670 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003671 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3672 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06003673 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02003674 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003675 return le32_to_cpu(lc->lcontext.LeaseState);
3676}
3677
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04003678static unsigned int
3679smb2_wp_retry_size(struct inode *inode)
3680{
3681 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
3682 SMB2_MAX_BUFFER_SIZE);
3683}
3684
Pavel Shilovsky52755802014-08-18 20:49:57 +04003685static bool
3686smb2_dir_needs_close(struct cifsFileInfo *cfile)
3687{
3688 return !cfile->invalidHandle;
3689}
3690
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003691static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003692fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
Steve French2b2f7542019-06-07 15:16:10 -05003693 struct smb_rqst *old_rq, __le16 cipher_type)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003694{
3695 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003696 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003697
3698 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
3699 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
3700 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
3701 tr_hdr->Flags = cpu_to_le16(0x01);
Steve French2b2f7542019-06-07 15:16:10 -05003702 if (cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3703 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3704 else
3705 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003706 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003707}
3708
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003709/* We can not use the normal sg_set_buf() as we will sometimes pass a
3710 * stack object as buf.
3711 */
3712static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3713 unsigned int buflen)
3714{
Sebastien Tisserantee9d6612019-08-01 12:06:08 -05003715 void *addr;
3716 /*
3717 * VMAP_STACK (at least) puts stack into the vmalloc address space
3718 */
3719 if (is_vmalloc_addr(buf))
3720 addr = vmalloc_to_page(buf);
3721 else
3722 addr = virt_to_page(buf);
3723 sg_set_page(sg, addr, buflen, offset_in_page(buf));
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11003724}
3725
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003726/* Assumes the first rqst has a transform header as the first iov.
3727 * I.e.
3728 * rqst[0].rq_iov[0] is transform header
3729 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
3730 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10003731 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003732static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003733init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003734{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003735 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003736 struct scatterlist *sg;
3737 unsigned int i;
3738 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003739 unsigned int idx = 0;
3740 int skip;
3741
3742 sg_len = 1;
3743 for (i = 0; i < num_rqst; i++)
3744 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003745
3746 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
3747 if (!sg)
3748 return NULL;
3749
3750 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003751 for (i = 0; i < num_rqst; i++) {
3752 for (j = 0; j < rqst[i].rq_nvec; j++) {
3753 /*
3754 * The first rqst has a transform header where the
3755 * first 20 bytes are not part of the encrypted blob
3756 */
3757 skip = (i == 0) && (j == 0) ? 20 : 0;
3758 smb2_sg_set_buf(&sg[idx++],
3759 rqst[i].rq_iov[j].iov_base + skip,
3760 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10003761 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05003762
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003763 for (j = 0; j < rqst[i].rq_npages; j++) {
3764 unsigned int len, offset;
3765
3766 rqst_page_get_length(&rqst[i], j, &len, &offset);
3767 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
3768 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003769 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003770 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003771 return sg;
3772}
3773
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003774static int
3775smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
3776{
3777 struct cifs_ses *ses;
3778 u8 *ses_enc_key;
3779
3780 spin_lock(&cifs_tcp_ses_lock);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02003781 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
3782 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3783 if (ses->Suid == ses_id) {
3784 ses_enc_key = enc ? ses->smb3encryptionkey :
3785 ses->smb3decryptionkey;
3786 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
3787 spin_unlock(&cifs_tcp_ses_lock);
3788 return 0;
3789 }
3790 }
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003791 }
3792 spin_unlock(&cifs_tcp_ses_lock);
3793
3794 return 1;
3795}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003796/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10003797 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
3798 * iov[0] - transform header (associate data),
3799 * iov[1-N] - SMB2 header and pages - data to encrypt.
3800 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003801 * untouched.
3802 */
3803static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003804crypt_message(struct TCP_Server_Info *server, int num_rqst,
3805 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003806{
3807 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003808 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003809 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003810 int rc = 0;
3811 struct scatterlist *sg;
3812 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003813 u8 key[SMB3_SIGN_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003814 struct aead_request *req;
3815 char *iv;
3816 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003817 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003818 struct crypto_aead *tfm;
3819 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3820
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003821 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
3822 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003823 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003824 enc ? "en" : "de");
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003825 return 0;
3826 }
3827
3828 rc = smb3_crypto_aead_allocate(server);
3829 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003830 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003831 return rc;
3832 }
3833
3834 tfm = enc ? server->secmech.ccmaesencrypt :
3835 server->secmech.ccmaesdecrypt;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08003836 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003837 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003838 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003839 return rc;
3840 }
3841
3842 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3843 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003844 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003845 return rc;
3846 }
3847
3848 req = aead_request_alloc(tfm, GFP_KERNEL);
3849 if (!req) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003850 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003851 return -ENOMEM;
3852 }
3853
3854 if (!enc) {
3855 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3856 crypt_len += SMB2_SIGNATURE_SIZE;
3857 }
3858
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003859 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003860 if (!sg) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003861 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003862 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003863 goto free_req;
3864 }
3865
3866 iv_len = crypto_aead_ivsize(tfm);
3867 iv = kzalloc(iv_len, GFP_KERNEL);
3868 if (!iv) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003869 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02003870 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003871 goto free_sg;
3872 }
Steve French2b2f7542019-06-07 15:16:10 -05003873
3874 if (server->cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3875 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3876 else {
3877 iv[0] = 3;
3878 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
3879 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003880
3881 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
3882 aead_request_set_ad(req, assoc_data_len);
3883
3884 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003885 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003886
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01003887 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
3888 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003889
3890 if (!rc && enc)
3891 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
3892
3893 kfree(iv);
3894free_sg:
3895 kfree(sg);
3896free_req:
3897 kfree(req);
3898 return rc;
3899}
3900
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003901void
3902smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003903{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003904 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003905
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003906 for (i = 0; i < num_rqst; i++) {
3907 if (rqst[i].rq_pages) {
3908 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
3909 put_page(rqst[i].rq_pages[j]);
3910 kfree(rqst[i].rq_pages);
3911 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003912 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003913}
3914
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003915/*
3916 * This function will initialize new_rq and encrypt the content.
3917 * The first entry, new_rq[0], only contains a single iov which contains
3918 * a smb2_transform_hdr and is pre-allocated by the caller.
3919 * This function then populates new_rq[1+] with the content from olq_rq[0+].
3920 *
3921 * The end result is an array of smb_rqst structures where the first structure
3922 * only contains a single iov for the transform header which we then can pass
3923 * to crypt_message().
3924 *
3925 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
3926 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
3927 */
3928static int
3929smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
3930 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003931{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003932 struct page **pages;
3933 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
3934 unsigned int npages;
3935 unsigned int orig_len = 0;
3936 int i, j;
3937 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003938
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003939 for (i = 1; i < num_rqst; i++) {
3940 npages = old_rq[i - 1].rq_npages;
3941 pages = kmalloc_array(npages, sizeof(struct page *),
3942 GFP_KERNEL);
3943 if (!pages)
3944 goto err_free;
3945
3946 new_rq[i].rq_pages = pages;
3947 new_rq[i].rq_npages = npages;
3948 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
3949 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
3950 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
3951 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
3952 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
3953
3954 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
3955
3956 for (j = 0; j < npages; j++) {
3957 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3958 if (!pages[j])
3959 goto err_free;
3960 }
3961
3962 /* copy pages form the old */
3963 for (j = 0; j < npages; j++) {
3964 char *dst, *src;
3965 unsigned int offset, len;
3966
3967 rqst_page_get_length(&new_rq[i], j, &len, &offset);
3968
3969 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
3970 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
3971
3972 memcpy(dst, src, len);
3973 kunmap(new_rq[i].rq_pages[j]);
3974 kunmap(old_rq[i - 1].rq_pages[j]);
3975 }
3976 }
3977
3978 /* fill the 1st iov with a transform header */
Steve French2b2f7542019-06-07 15:16:10 -05003979 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003980
3981 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02003982 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10003983 if (rc)
3984 goto err_free;
3985
3986 return rc;
3987
3988err_free:
3989 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
3990 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07003991}
3992
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08003993static int
3994smb3_is_transform_hdr(void *buf)
3995{
3996 struct smb2_transform_hdr *trhdr = buf;
3997
3998 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
3999}
4000
4001static int
4002decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
4003 unsigned int buf_data_size, struct page **pages,
4004 unsigned int npages, unsigned int page_data_size)
4005{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004006 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004007 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004008 int rc;
4009
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004010 iov[0].iov_base = buf;
4011 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
4012 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
4013 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004014
4015 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004016 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004017 rqst.rq_pages = pages;
4018 rqst.rq_npages = npages;
4019 rqst.rq_pagesz = PAGE_SIZE;
4020 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
4021
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004022 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02004023 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004024
4025 if (rc)
4026 return rc;
4027
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004028 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004029
4030 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004031
4032 return rc;
4033}
4034
4035static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004036read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
4037 unsigned int npages, unsigned int len)
4038{
4039 int i;
4040 int length;
4041
4042 for (i = 0; i < npages; i++) {
4043 struct page *page = pages[i];
4044 size_t n;
4045
4046 n = len;
4047 if (len >= PAGE_SIZE) {
4048 /* enough data to fill the page */
4049 n = PAGE_SIZE;
4050 len -= n;
4051 } else {
4052 zero_user(page, len, PAGE_SIZE - len);
4053 len = 0;
4054 }
Long Li1dbe3462018-05-30 12:47:55 -07004055 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004056 if (length < 0)
4057 return length;
4058 server->total_read += length;
4059 }
4060
4061 return 0;
4062}
4063
4064static int
4065init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
4066 unsigned int cur_off, struct bio_vec **page_vec)
4067{
4068 struct bio_vec *bvec;
4069 int i;
4070
4071 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
4072 if (!bvec)
4073 return -ENOMEM;
4074
4075 for (i = 0; i < npages; i++) {
4076 bvec[i].bv_page = pages[i];
4077 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
4078 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
4079 data_size -= bvec[i].bv_len;
4080 }
4081
4082 if (data_size != 0) {
4083 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
4084 kfree(bvec);
4085 return -EIO;
4086 }
4087
4088 *page_vec = bvec;
4089 return 0;
4090}
4091
4092static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004093handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
4094 char *buf, unsigned int buf_len, struct page **pages,
4095 unsigned int npages, unsigned int page_data_size)
4096{
4097 unsigned int data_offset;
4098 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004099 unsigned int cur_off;
4100 unsigned int cur_page_idx;
4101 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004102 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10004103 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004104 struct bio_vec *bvec = NULL;
4105 struct iov_iter iter;
4106 struct kvec iov;
4107 int length;
Long Li74dcf412017-11-22 17:38:46 -07004108 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004109
4110 if (shdr->Command != SMB2_READ) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004111 cifs_server_dbg(VFS, "only big read responses are supported\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004112 return -ENOTSUPP;
4113 }
4114
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004115 if (server->ops->is_session_expired &&
4116 server->ops->is_session_expired(buf)) {
4117 cifs_reconnect(server);
4118 wake_up(&server->response_q);
4119 return -1;
4120 }
4121
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004122 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08004123 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004124 return -1;
4125
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004126 /* set up first two iov to get credits */
4127 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004128 rdata->iov[0].iov_len = 0;
4129 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004130 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004131 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004132 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4133 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4134 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4135 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4136
4137 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004138 if (rdata->result != 0) {
4139 cifs_dbg(FYI, "%s: server returned error %d\n",
4140 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004141 /* normal error on read response */
4142 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004143 return 0;
4144 }
4145
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004146 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07004147#ifdef CONFIG_CIFS_SMB_DIRECT
4148 use_rdma_mr = rdata->mr;
4149#endif
4150 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004151
4152 if (data_offset < server->vals->read_rsp_size) {
4153 /*
4154 * win2k8 sometimes sends an offset of 0 when the read
4155 * is beyond the EOF. Treat it as if the data starts just after
4156 * the header.
4157 */
4158 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4159 __func__, data_offset);
4160 data_offset = server->vals->read_rsp_size;
4161 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4162 /* data_offset is beyond the end of smallbuf */
4163 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4164 __func__, data_offset);
4165 rdata->result = -EIO;
4166 dequeue_mid(mid, rdata->result);
4167 return 0;
4168 }
4169
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004170 pad_len = data_offset - server->vals->read_rsp_size;
4171
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004172 if (buf_len <= data_offset) {
4173 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004174 cur_page_idx = pad_len / PAGE_SIZE;
4175 cur_off = pad_len % PAGE_SIZE;
4176
4177 if (cur_page_idx != 0) {
4178 /* data offset is beyond the 1st page of response */
4179 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4180 __func__, data_offset);
4181 rdata->result = -EIO;
4182 dequeue_mid(mid, rdata->result);
4183 return 0;
4184 }
4185
4186 if (data_len > page_data_size - pad_len) {
4187 /* data_len is corrupt -- discard frame */
4188 rdata->result = -EIO;
4189 dequeue_mid(mid, rdata->result);
4190 return 0;
4191 }
4192
4193 rdata->result = init_read_bvec(pages, npages, page_data_size,
4194 cur_off, &bvec);
4195 if (rdata->result != 0) {
4196 dequeue_mid(mid, rdata->result);
4197 return 0;
4198 }
4199
David Howellsaa563d72018-10-20 00:57:56 +01004200 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004201 } else if (buf_len >= data_offset + data_len) {
4202 /* read response payload is in buf */
4203 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
4204 iov.iov_base = buf + data_offset;
4205 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01004206 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004207 } else {
4208 /* read response payload cannot be in both buf and pages */
4209 WARN_ONCE(1, "buf can not contain only a part of read data");
4210 rdata->result = -EIO;
4211 dequeue_mid(mid, rdata->result);
4212 return 0;
4213 }
4214
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004215 length = rdata->copy_into_pages(server, rdata, &iter);
4216
4217 kfree(bvec);
4218
4219 if (length < 0)
4220 return length;
4221
4222 dequeue_mid(mid, false);
4223 return length;
4224}
4225
Steve French35cf94a2019-09-07 01:09:49 -05004226struct smb2_decrypt_work {
4227 struct work_struct decrypt;
4228 struct TCP_Server_Info *server;
4229 struct page **ppages;
4230 char *buf;
4231 unsigned int npages;
4232 unsigned int len;
4233};
4234
4235
4236static void smb2_decrypt_offload(struct work_struct *work)
4237{
4238 struct smb2_decrypt_work *dw = container_of(work,
4239 struct smb2_decrypt_work, decrypt);
4240 int i, rc;
4241 struct mid_q_entry *mid;
4242
4243 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
4244 dw->ppages, dw->npages, dw->len);
4245 if (rc) {
4246 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4247 goto free_pages;
4248 }
4249
Steve French22553972019-09-13 16:47:31 -05004250 dw->server->lstrp = jiffies;
Steve French35cf94a2019-09-07 01:09:49 -05004251 mid = smb2_find_mid(dw->server, dw->buf);
4252 if (mid == NULL)
4253 cifs_dbg(FYI, "mid not found\n");
4254 else {
4255 mid->decrypted = true;
4256 rc = handle_read_data(dw->server, mid, dw->buf,
4257 dw->server->vals->read_rsp_size,
4258 dw->ppages, dw->npages, dw->len);
Steve French22553972019-09-13 16:47:31 -05004259 mid->callback(mid);
4260 cifs_mid_q_entry_release(mid);
Steve French35cf94a2019-09-07 01:09:49 -05004261 }
4262
Steve French35cf94a2019-09-07 01:09:49 -05004263free_pages:
4264 for (i = dw->npages-1; i >= 0; i--)
4265 put_page(dw->ppages[i]);
4266
4267 kfree(dw->ppages);
4268 cifs_small_buf_release(dw->buf);
Steve Frencha08d8972019-10-26 16:00:44 -05004269 kfree(dw);
Steve French35cf94a2019-09-07 01:09:49 -05004270}
4271
4272
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004273static int
Steve French35cf94a2019-09-07 01:09:49 -05004274receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4275 int *num_mids)
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004276{
4277 char *buf = server->smallbuf;
4278 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4279 unsigned int npages;
4280 struct page **pages;
4281 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004282 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004283 int rc;
4284 int i = 0;
Steve French35cf94a2019-09-07 01:09:49 -05004285 struct smb2_decrypt_work *dw;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004286
Steve French35cf94a2019-09-07 01:09:49 -05004287 *num_mids = 1;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004288 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004289 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4290
4291 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4292 if (rc < 0)
4293 return rc;
4294 server->total_read += rc;
4295
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004296 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11004297 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004298 npages = DIV_ROUND_UP(len, PAGE_SIZE);
4299
4300 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
4301 if (!pages) {
4302 rc = -ENOMEM;
4303 goto discard_data;
4304 }
4305
4306 for (; i < npages; i++) {
4307 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4308 if (!pages[i]) {
4309 rc = -ENOMEM;
4310 goto discard_data;
4311 }
4312 }
4313
4314 /* read read data into pages */
4315 rc = read_data_into_pages(server, pages, npages, len);
4316 if (rc)
4317 goto free_pages;
4318
Pavel Shilovsky350be252017-04-10 10:31:33 -07004319 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004320 if (rc)
4321 goto free_pages;
4322
Steve French35cf94a2019-09-07 01:09:49 -05004323 /*
4324 * For large reads, offload to different thread for better performance,
4325 * use more cores decrypting which can be expensive
4326 */
4327
Steve French10328c42019-09-09 13:30:15 -05004328 if ((server->min_offload) && (server->in_flight > 1) &&
Steve French563317e2019-09-08 23:22:02 -05004329 (server->pdu_size >= server->min_offload)) {
Steve French35cf94a2019-09-07 01:09:49 -05004330 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4331 if (dw == NULL)
4332 goto non_offloaded_decrypt;
4333
4334 dw->buf = server->smallbuf;
4335 server->smallbuf = (char *)cifs_small_buf_get();
4336
4337 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4338
4339 dw->npages = npages;
4340 dw->server = server;
4341 dw->ppages = pages;
4342 dw->len = len;
Steve Frencha08d8972019-10-26 16:00:44 -05004343 queue_work(decrypt_wq, &dw->decrypt);
Steve French35cf94a2019-09-07 01:09:49 -05004344 *num_mids = 0; /* worker thread takes care of finding mid */
4345 return -1;
4346 }
4347
4348non_offloaded_decrypt:
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004349 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004350 pages, npages, len);
4351 if (rc)
4352 goto free_pages;
4353
4354 *mid = smb2_find_mid(server, buf);
4355 if (*mid == NULL)
4356 cifs_dbg(FYI, "mid not found\n");
4357 else {
4358 cifs_dbg(FYI, "mid found\n");
4359 (*mid)->decrypted = true;
4360 rc = handle_read_data(server, *mid, buf,
4361 server->vals->read_rsp_size,
4362 pages, npages, len);
4363 }
4364
4365free_pages:
4366 for (i = i - 1; i >= 0; i--)
4367 put_page(pages[i]);
4368 kfree(pages);
4369 return rc;
4370discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07004371 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004372 goto free_pages;
4373}
4374
4375static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004376receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004377 struct mid_q_entry **mids, char **bufs,
4378 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004379{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004380 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004381 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004382 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004383 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004384 unsigned int buf_size;
4385 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004386 int next_is_large;
4387 char *next_buffer = NULL;
4388
4389 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004390
4391 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004392 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004393 server->large_buf = true;
4394 memcpy(server->bigbuf, buf, server->total_read);
4395 buf = server->bigbuf;
4396 }
4397
4398 /* now read the rest */
4399 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004400 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004401 if (length < 0)
4402 return length;
4403 server->total_read += length;
4404
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004405 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004406 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
4407 if (length)
4408 return length;
4409
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004410 next_is_large = server->large_buf;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004411one_more:
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004412 shdr = (struct smb2_sync_hdr *)buf;
4413 if (shdr->NextCommand) {
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004414 if (next_is_large)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004415 next_buffer = (char *)cifs_buf_get();
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004416 else
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004417 next_buffer = (char *)cifs_small_buf_get();
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004418 memcpy(next_buffer,
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004419 buf + le32_to_cpu(shdr->NextCommand),
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004420 pdu_length - le32_to_cpu(shdr->NextCommand));
4421 }
4422
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004423 mid_entry = smb2_find_mid(server, buf);
4424 if (mid_entry == NULL)
4425 cifs_dbg(FYI, "mid not found\n");
4426 else {
4427 cifs_dbg(FYI, "mid found\n");
4428 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004429 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004430 }
4431
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004432 if (*num_mids >= MAX_COMPOUND) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004433 cifs_server_dbg(VFS, "too many PDUs in compound\n");
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004434 return -1;
4435 }
4436 bufs[*num_mids] = buf;
4437 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004438
4439 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004440 ret = mid_entry->handle(server, mid_entry);
4441 else
4442 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004443
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004444 if (ret == 0 && shdr->NextCommand) {
4445 pdu_length -= le32_to_cpu(shdr->NextCommand);
4446 server->large_buf = next_is_large;
4447 if (next_is_large)
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004448 server->bigbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004449 else
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004450 server->smallbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004451 goto one_more;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004452 } else if (ret != 0) {
4453 /*
4454 * ret != 0 here means that we didn't get to handle_mid() thus
4455 * server->smallbuf and server->bigbuf are still valid. We need
4456 * to free next_buffer because it is not going to be used
4457 * anywhere.
4458 */
4459 if (next_is_large)
4460 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4461 else
4462 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004463 }
4464
4465 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004466}
4467
4468static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004469smb3_receive_transform(struct TCP_Server_Info *server,
4470 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004471{
4472 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004473 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004474 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4475 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4476
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004477 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004478 sizeof(struct smb2_sync_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004479 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004480 pdu_length);
4481 cifs_reconnect(server);
4482 wake_up(&server->response_q);
4483 return -ECONNABORTED;
4484 }
4485
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004486 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004487 cifs_server_dbg(VFS, "Transform message is broken\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004488 cifs_reconnect(server);
4489 wake_up(&server->response_q);
4490 return -ECONNABORTED;
4491 }
4492
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004493 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004494 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
Steve French35cf94a2019-09-07 01:09:49 -05004495 return receive_encrypted_read(server, &mids[0], num_mids);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004496 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004497
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004498 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004499}
4500
4501int
4502smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4503{
4504 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
4505
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004506 return handle_read_data(server, mid, buf, server->pdu_size,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004507 NULL, 0, 0);
4508}
4509
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004510static int
4511smb2_next_header(char *buf)
4512{
4513 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
4514 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
4515
4516 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
4517 return sizeof(struct smb2_transform_hdr) +
4518 le32_to_cpu(t_hdr->OriginalMessageSize);
4519
4520 return le32_to_cpu(hdr->NextCommand);
4521}
4522
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004523static int
4524smb2_make_node(unsigned int xid, struct inode *inode,
4525 struct dentry *dentry, struct cifs_tcon *tcon,
4526 char *full_path, umode_t mode, dev_t dev)
4527{
4528 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4529 int rc = -EPERM;
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004530 FILE_ALL_INFO *buf = NULL;
4531 struct cifs_io_parms io_parms;
4532 __u32 oplock = 0;
4533 struct cifs_fid fid;
4534 struct cifs_open_parms oparms;
4535 unsigned int bytes_written;
4536 struct win_dev *pdev;
4537 struct kvec iov[2];
4538
4539 /*
4540 * Check if mounted with mount parm 'sfu' mount parm.
4541 * SFU emulation should work with all servers, but only
4542 * supports block and char device (no socket & fifo),
4543 * and was used by default in earlier versions of Windows
4544 */
4545 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
4546 goto out;
4547
4548 /*
4549 * TODO: Add ability to create instead via reparse point. Windows (e.g.
4550 * their current NFS server) uses this approach to expose special files
4551 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
4552 */
4553
4554 if (!S_ISCHR(mode) && !S_ISBLK(mode))
4555 goto out;
4556
4557 cifs_dbg(FYI, "sfu compat create special file\n");
4558
4559 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
4560 if (buf == NULL) {
4561 rc = -ENOMEM;
4562 goto out;
4563 }
4564
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004565 oparms.tcon = tcon;
4566 oparms.cifs_sb = cifs_sb;
4567 oparms.desired_access = GENERIC_WRITE;
Amir Goldstein0f060932020-02-03 21:46:43 +02004568 oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
4569 CREATE_OPTION_SPECIAL);
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004570 oparms.disposition = FILE_CREATE;
4571 oparms.path = full_path;
4572 oparms.fid = &fid;
4573 oparms.reconnect = false;
4574
4575 if (tcon->ses->server->oplocks)
4576 oplock = REQ_OPLOCK;
4577 else
4578 oplock = 0;
4579 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
4580 if (rc)
4581 goto out;
4582
4583 /*
4584 * BB Do not bother to decode buf since no local inode yet to put
4585 * timestamps in, but we can reuse it safely.
4586 */
4587
4588 pdev = (struct win_dev *)buf;
4589 io_parms.pid = current->tgid;
4590 io_parms.tcon = tcon;
4591 io_parms.offset = 0;
4592 io_parms.length = sizeof(struct win_dev);
4593 iov[1].iov_base = buf;
4594 iov[1].iov_len = sizeof(struct win_dev);
4595 if (S_ISCHR(mode)) {
4596 memcpy(pdev->type, "IntxCHR", 8);
4597 pdev->major = cpu_to_le64(MAJOR(dev));
4598 pdev->minor = cpu_to_le64(MINOR(dev));
4599 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4600 &bytes_written, iov, 1);
4601 } else if (S_ISBLK(mode)) {
4602 memcpy(pdev->type, "IntxBLK", 8);
4603 pdev->major = cpu_to_le64(MAJOR(dev));
4604 pdev->minor = cpu_to_le64(MINOR(dev));
4605 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4606 &bytes_written, iov, 1);
4607 }
4608 tcon->ses->server->ops->close(xid, tcon, &fid);
4609 d_drop(dentry);
4610
4611 /* FIXME: add code here to set EAs */
4612out:
4613 kfree(buf);
4614 return rc;
4615}
4616
4617
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004618struct smb_version_operations smb20_operations = {
4619 .compare_fids = smb2_compare_fids,
4620 .setup_request = smb2_setup_request,
4621 .setup_async_request = smb2_setup_async_request,
4622 .check_receive = smb2_check_receive,
4623 .add_credits = smb2_add_credits,
4624 .set_credits = smb2_set_credits,
4625 .get_credits_field = smb2_get_credits_field,
4626 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004627 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004628 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004629 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004630 .read_data_offset = smb2_read_data_offset,
4631 .read_data_length = smb2_read_data_length,
4632 .map_error = map_smb2_to_linux_error,
4633 .find_mid = smb2_find_mid,
4634 .check_message = smb2_check_message,
4635 .dump_detail = smb2_dump_detail,
4636 .clear_stats = smb2_clear_stats,
4637 .print_stats = smb2_print_stats,
4638 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004639 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004640 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004641 .need_neg = smb2_need_neg,
4642 .negotiate = smb2_negotiate,
4643 .negotiate_wsize = smb2_negotiate_wsize,
4644 .negotiate_rsize = smb2_negotiate_rsize,
4645 .sess_setup = SMB2_sess_setup,
4646 .logoff = SMB2_logoff,
4647 .tree_connect = SMB2_tcon,
4648 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004649 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004650 .is_path_accessible = smb2_is_path_accessible,
4651 .can_echo = smb2_can_echo,
4652 .echo = SMB2_echo,
4653 .query_path_info = smb2_query_path_info,
4654 .get_srv_inum = smb2_get_srv_inum,
4655 .query_file_info = smb2_query_file_info,
4656 .set_path_size = smb2_set_path_size,
4657 .set_file_size = smb2_set_file_size,
4658 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004659 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004660 .mkdir = smb2_mkdir,
4661 .mkdir_setinfo = smb2_mkdir_setinfo,
4662 .rmdir = smb2_rmdir,
4663 .unlink = smb2_unlink,
4664 .rename = smb2_rename_path,
4665 .create_hardlink = smb2_create_hardlink,
4666 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01004667 .query_mf_symlink = smb3_query_mf_symlink,
4668 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004669 .open = smb2_open_file,
4670 .set_fid = smb2_set_fid,
4671 .close = smb2_close_file,
4672 .flush = smb2_flush_file,
4673 .async_readv = smb2_async_readv,
4674 .async_writev = smb2_async_writev,
4675 .sync_read = smb2_sync_read,
4676 .sync_write = smb2_sync_write,
4677 .query_dir_first = smb2_query_dir_first,
4678 .query_dir_next = smb2_query_dir_next,
4679 .close_dir = smb2_close_dir,
4680 .calc_smb_size = smb2_calc_size,
4681 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004682 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004683 .oplock_response = smb2_oplock_response,
4684 .queryfs = smb2_queryfs,
4685 .mand_lock = smb2_mand_lock,
4686 .mand_unlock_range = smb2_unlock_range,
4687 .push_mand_locks = smb2_push_mandatory_locks,
4688 .get_lease_key = smb2_get_lease_key,
4689 .set_lease_key = smb2_set_lease_key,
4690 .new_lease_key = smb2_new_lease_key,
4691 .calc_signature = smb2_calc_signature,
4692 .is_read_op = smb2_is_read_op,
4693 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004694 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004695 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004696 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004697 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004698 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004699 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304700 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004701#ifdef CONFIG_CIFS_XATTR
4702 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004703 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004704#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004705 .get_acl = get_smb2_acl,
4706 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004707 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004708 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004709 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004710 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004711 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004712 .llseek = smb3_llseek,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004713};
4714
Steve French1080ef72011-02-24 18:07:19 +00004715struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07004716 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004717 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04004718 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004719 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04004720 .add_credits = smb2_add_credits,
4721 .set_credits = smb2_set_credits,
4722 .get_credits_field = smb2_get_credits_field,
4723 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004724 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004725 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04004726 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004727 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004728 .read_data_offset = smb2_read_data_offset,
4729 .read_data_length = smb2_read_data_length,
4730 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04004731 .find_mid = smb2_find_mid,
4732 .check_message = smb2_check_message,
4733 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04004734 .clear_stats = smb2_clear_stats,
4735 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004736 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004737 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004738 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04004739 .need_neg = smb2_need_neg,
4740 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07004741 .negotiate_wsize = smb2_negotiate_wsize,
4742 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04004743 .sess_setup = SMB2_sess_setup,
4744 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04004745 .tree_connect = SMB2_tcon,
4746 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05004747 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04004748 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04004749 .can_echo = smb2_can_echo,
4750 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04004751 .query_path_info = smb2_query_path_info,
4752 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07004753 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07004754 .set_path_size = smb2_set_path_size,
4755 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07004756 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004757 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04004758 .mkdir = smb2_mkdir,
4759 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04004760 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07004761 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07004762 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07004763 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004764 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004765 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004766 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07004767 .open = smb2_open_file,
4768 .set_fid = smb2_set_fid,
4769 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07004770 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07004771 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07004772 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07004773 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07004774 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07004775 .query_dir_first = smb2_query_dir_first,
4776 .query_dir_next = smb2_query_dir_next,
4777 .close_dir = smb2_close_dir,
4778 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07004779 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004780 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07004781 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07004782 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07004783 .mand_lock = smb2_mand_lock,
4784 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07004785 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07004786 .get_lease_key = smb2_get_lease_key,
4787 .set_lease_key = smb2_set_lease_key,
4788 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06004789 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004790 .is_read_op = smb21_is_read_op,
4791 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004792 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004793 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004794 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004795 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004796 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05004797 .enum_snapshots = smb3_enum_snapshots,
Steve French2c6251a2020-02-12 22:37:08 -06004798 .notify = smb3_notify,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004799 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304800 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004801#ifdef CONFIG_CIFS_XATTR
4802 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004803 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004804#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004805 .get_acl = get_smb2_acl,
4806 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004807 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004808 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004809 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004810 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004811 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004812 .llseek = smb3_llseek,
Steve French38107d42012-12-08 22:08:06 -06004813};
4814
Steve French38107d42012-12-08 22:08:06 -06004815struct smb_version_operations smb30_operations = {
4816 .compare_fids = smb2_compare_fids,
4817 .setup_request = smb2_setup_request,
4818 .setup_async_request = smb2_setup_async_request,
4819 .check_receive = smb2_check_receive,
4820 .add_credits = smb2_add_credits,
4821 .set_credits = smb2_set_credits,
4822 .get_credits_field = smb2_get_credits_field,
4823 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04004824 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004825 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06004826 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004827 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06004828 .read_data_offset = smb2_read_data_offset,
4829 .read_data_length = smb2_read_data_length,
4830 .map_error = map_smb2_to_linux_error,
4831 .find_mid = smb2_find_mid,
4832 .check_message = smb2_check_message,
4833 .dump_detail = smb2_dump_detail,
4834 .clear_stats = smb2_clear_stats,
4835 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05004836 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06004837 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004838 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004839 .downgrade_oplock = smb3_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06004840 .need_neg = smb2_need_neg,
4841 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004842 .negotiate_wsize = smb3_negotiate_wsize,
4843 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06004844 .sess_setup = SMB2_sess_setup,
4845 .logoff = SMB2_logoff,
4846 .tree_connect = SMB2_tcon,
4847 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05004848 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06004849 .is_path_accessible = smb2_is_path_accessible,
4850 .can_echo = smb2_can_echo,
4851 .echo = SMB2_echo,
4852 .query_path_info = smb2_query_path_info,
4853 .get_srv_inum = smb2_get_srv_inum,
4854 .query_file_info = smb2_query_file_info,
4855 .set_path_size = smb2_set_path_size,
4856 .set_file_size = smb2_set_file_size,
4857 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05004858 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06004859 .mkdir = smb2_mkdir,
4860 .mkdir_setinfo = smb2_mkdir_setinfo,
4861 .rmdir = smb2_rmdir,
4862 .unlink = smb2_unlink,
4863 .rename = smb2_rename_path,
4864 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04004865 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05004866 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05004867 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06004868 .open = smb2_open_file,
4869 .set_fid = smb2_set_fid,
4870 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06004871 .close_getattr = smb2_close_getattr,
Steve French38107d42012-12-08 22:08:06 -06004872 .flush = smb2_flush_file,
4873 .async_readv = smb2_async_readv,
4874 .async_writev = smb2_async_writev,
4875 .sync_read = smb2_sync_read,
4876 .sync_write = smb2_sync_write,
4877 .query_dir_first = smb2_query_dir_first,
4878 .query_dir_next = smb2_query_dir_next,
4879 .close_dir = smb2_close_dir,
4880 .calc_smb_size = smb2_calc_size,
4881 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004882 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06004883 .oplock_response = smb2_oplock_response,
4884 .queryfs = smb2_queryfs,
4885 .mand_lock = smb2_mand_lock,
4886 .mand_unlock_range = smb2_unlock_range,
4887 .push_mand_locks = smb2_push_mandatory_locks,
4888 .get_lease_key = smb2_get_lease_key,
4889 .set_lease_key = smb2_set_lease_key,
4890 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06004891 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06004892 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05004893 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004894 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004895 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004896 .create_lease_buf = smb3_create_lease_buf,
4897 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05004898 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05004899 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06004900 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004901 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04004902 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05004903 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05004904 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06004905 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004906 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004907 .is_transform_hdr = smb3_is_transform_hdr,
4908 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01004909 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05304910 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004911#ifdef CONFIG_CIFS_XATTR
4912 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10004913 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10004914#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05004915 .get_acl = get_smb2_acl,
4916 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05004917 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004918 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05004919 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004920 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004921 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10004922 .llseek = smb3_llseek,
Steve French1080ef72011-02-24 18:07:19 +00004923};
4924
Steve Frenchaab18932015-06-23 23:37:11 -05004925struct smb_version_operations smb311_operations = {
4926 .compare_fids = smb2_compare_fids,
4927 .setup_request = smb2_setup_request,
4928 .setup_async_request = smb2_setup_async_request,
4929 .check_receive = smb2_check_receive,
4930 .add_credits = smb2_add_credits,
4931 .set_credits = smb2_set_credits,
4932 .get_credits_field = smb2_get_credits_field,
4933 .get_credits = smb2_get_credits,
4934 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004935 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05004936 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08004937 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05004938 .read_data_offset = smb2_read_data_offset,
4939 .read_data_length = smb2_read_data_length,
4940 .map_error = map_smb2_to_linux_error,
4941 .find_mid = smb2_find_mid,
4942 .check_message = smb2_check_message,
4943 .dump_detail = smb2_dump_detail,
4944 .clear_stats = smb2_clear_stats,
4945 .print_stats = smb2_print_stats,
4946 .dump_share_caps = smb2_dump_share_caps,
4947 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08004948 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004949 .downgrade_oplock = smb3_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05004950 .need_neg = smb2_need_neg,
4951 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05004952 .negotiate_wsize = smb3_negotiate_wsize,
4953 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05004954 .sess_setup = SMB2_sess_setup,
4955 .logoff = SMB2_logoff,
4956 .tree_connect = SMB2_tcon,
4957 .tree_disconnect = SMB2_tdis,
4958 .qfs_tcon = smb3_qfs_tcon,
4959 .is_path_accessible = smb2_is_path_accessible,
4960 .can_echo = smb2_can_echo,
4961 .echo = SMB2_echo,
4962 .query_path_info = smb2_query_path_info,
4963 .get_srv_inum = smb2_get_srv_inum,
4964 .query_file_info = smb2_query_file_info,
4965 .set_path_size = smb2_set_path_size,
4966 .set_file_size = smb2_set_file_size,
4967 .set_file_info = smb2_set_file_info,
4968 .set_compression = smb2_set_compression,
4969 .mkdir = smb2_mkdir,
4970 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05004971 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05004972 .rmdir = smb2_rmdir,
4973 .unlink = smb2_unlink,
4974 .rename = smb2_rename_path,
4975 .create_hardlink = smb2_create_hardlink,
4976 .query_symlink = smb2_query_symlink,
4977 .query_mf_symlink = smb3_query_mf_symlink,
4978 .create_mf_symlink = smb3_create_mf_symlink,
4979 .open = smb2_open_file,
4980 .set_fid = smb2_set_fid,
4981 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06004982 .close_getattr = smb2_close_getattr,
Steve Frenchaab18932015-06-23 23:37:11 -05004983 .flush = smb2_flush_file,
4984 .async_readv = smb2_async_readv,
4985 .async_writev = smb2_async_writev,
4986 .sync_read = smb2_sync_read,
4987 .sync_write = smb2_sync_write,
4988 .query_dir_first = smb2_query_dir_first,
4989 .query_dir_next = smb2_query_dir_next,
4990 .close_dir = smb2_close_dir,
4991 .calc_smb_size = smb2_calc_size,
4992 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004993 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05004994 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05004995 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05004996 .mand_lock = smb2_mand_lock,
4997 .mand_unlock_range = smb2_unlock_range,
4998 .push_mand_locks = smb2_push_mandatory_locks,
4999 .get_lease_key = smb2_get_lease_key,
5000 .set_lease_key = smb2_set_lease_key,
5001 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06005002 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05005003 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05005004 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05005005 .is_read_op = smb21_is_read_op,
5006 .set_oplock_level = smb3_set_oplock_level,
5007 .create_lease_buf = smb3_create_lease_buf,
5008 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005009 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07005010 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05005011/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
5012 .wp_retry_size = smb2_wp_retry_size,
5013 .dir_needs_close = smb2_dir_needs_close,
5014 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05005015 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06005016 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07005017 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005018 .is_transform_hdr = smb3_is_transform_hdr,
5019 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005020 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305021 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005022#ifdef CONFIG_CIFS_XATTR
5023 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005024 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005025#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10005026 .get_acl = get_smb2_acl,
5027 .get_acl_by_fid = get_smb2_acl_by_fid,
5028 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005029 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005030 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005031 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005032 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005033 .llseek = smb3_llseek,
Steve Frenchaab18932015-06-23 23:37:11 -05005034};
Steve Frenchaab18932015-06-23 23:37:11 -05005035
Steve Frenchdd446b12012-11-28 23:21:06 -06005036struct smb_version_values smb20_values = {
5037 .version_string = SMB20_VERSION_STRING,
5038 .protocol_id = SMB20_PROT_ID,
5039 .req_capabilities = 0, /* MBZ */
5040 .large_lock_type = 0,
5041 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5042 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5043 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005044 .header_size = sizeof(struct smb2_sync_hdr),
5045 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06005046 .max_header_size = MAX_SMB2_HDR_SIZE,
5047 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5048 .lock_cmd = SMB2_LOCK,
5049 .cap_unix = 0,
5050 .cap_nt_find = SMB2_NT_FIND,
5051 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005052 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5053 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005054 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06005055};
5056
Steve French1080ef72011-02-24 18:07:19 +00005057struct smb_version_values smb21_values = {
5058 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005059 .protocol_id = SMB21_PROT_ID,
5060 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
5061 .large_lock_type = 0,
5062 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5063 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5064 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005065 .header_size = sizeof(struct smb2_sync_hdr),
5066 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005067 .max_header_size = MAX_SMB2_HDR_SIZE,
5068 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5069 .lock_cmd = SMB2_LOCK,
5070 .cap_unix = 0,
5071 .cap_nt_find = SMB2_NT_FIND,
5072 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005073 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5074 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005075 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05005076};
5077
Steve French9764c022017-09-17 10:41:35 -05005078struct smb_version_values smb3any_values = {
5079 .version_string = SMB3ANY_VERSION_STRING,
5080 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005081 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005082 .large_lock_type = 0,
5083 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5084 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5085 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005086 .header_size = sizeof(struct smb2_sync_hdr),
5087 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005088 .max_header_size = MAX_SMB2_HDR_SIZE,
5089 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5090 .lock_cmd = SMB2_LOCK,
5091 .cap_unix = 0,
5092 .cap_nt_find = SMB2_NT_FIND,
5093 .cap_large_files = SMB2_LARGE_FILES,
5094 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5095 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5096 .create_lease_size = sizeof(struct create_lease_v2),
5097};
5098
5099struct smb_version_values smbdefault_values = {
5100 .version_string = SMBDEFAULT_VERSION_STRING,
5101 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005102 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005103 .large_lock_type = 0,
5104 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5105 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5106 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005107 .header_size = sizeof(struct smb2_sync_hdr),
5108 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005109 .max_header_size = MAX_SMB2_HDR_SIZE,
5110 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5111 .lock_cmd = SMB2_LOCK,
5112 .cap_unix = 0,
5113 .cap_nt_find = SMB2_NT_FIND,
5114 .cap_large_files = SMB2_LARGE_FILES,
5115 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5116 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5117 .create_lease_size = sizeof(struct create_lease_v2),
5118};
5119
Steve Frenche4aa25e2012-10-01 12:26:22 -05005120struct smb_version_values smb30_values = {
5121 .version_string = SMB30_VERSION_STRING,
5122 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005123 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07005124 .large_lock_type = 0,
5125 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5126 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5127 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005128 .header_size = sizeof(struct smb2_sync_hdr),
5129 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04005130 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005131 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005132 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04005133 .cap_unix = 0,
5134 .cap_nt_find = SMB2_NT_FIND,
5135 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005136 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5137 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005138 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00005139};
Steve French20b6d8b2013-06-12 22:48:41 -05005140
5141struct smb_version_values smb302_values = {
5142 .version_string = SMB302_VERSION_STRING,
5143 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005144 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05005145 .large_lock_type = 0,
5146 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5147 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5148 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005149 .header_size = sizeof(struct smb2_sync_hdr),
5150 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05005151 .max_header_size = MAX_SMB2_HDR_SIZE,
5152 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5153 .lock_cmd = SMB2_LOCK,
5154 .cap_unix = 0,
5155 .cap_nt_find = SMB2_NT_FIND,
5156 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005157 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5158 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005159 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05005160};
Steve French5f7fbf72014-12-17 22:52:58 -06005161
Steve French5f7fbf72014-12-17 22:52:58 -06005162struct smb_version_values smb311_values = {
5163 .version_string = SMB311_VERSION_STRING,
5164 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005165 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06005166 .large_lock_type = 0,
5167 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5168 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5169 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005170 .header_size = sizeof(struct smb2_sync_hdr),
5171 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06005172 .max_header_size = MAX_SMB2_HDR_SIZE,
5173 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5174 .lock_cmd = SMB2_LOCK,
5175 .cap_unix = 0,
5176 .cap_nt_find = SMB2_NT_FIND,
5177 .cap_large_files = SMB2_LARGE_FILES,
5178 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5179 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5180 .create_lease_size = sizeof(struct create_lease_v2),
5181};