blob: c5b1dea54ebcdf5f99827f8d62918685deebd07f [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Steve French1080ef72011-02-24 18:07:19 +00006 */
7
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07008#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07009#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050010#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070011#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020012#include <linux/uuid.h>
Aurelien Aptel35adffe2019-09-20 06:29:39 +020013#include <linux/sort.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070014#include <crypto/aead.h>
Christoph Hellwig10c5db22020-05-23 09:30:11 +020015#include <linux/fiemap.h>
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +100016#include "cifsfs.h"
Steve French1080ef72011-02-24 18:07:19 +000017#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040018#include "smb2pdu.h"
19#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040020#include "cifsproto.h"
21#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040022#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070023#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070024#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050025#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070026#include "smbdirect.h"
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -060027#include "fs_context.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040028
Pavel Shilovskyef68e832019-01-18 17:25:36 -080029/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040030static int
31change_conf(struct TCP_Server_Info *server)
32{
33 server->credits += server->echo_credits + server->oplock_credits;
34 server->oplock_credits = server->echo_credits = 0;
35 switch (server->credits) {
36 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080037 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040038 case 1:
39 server->echoes = false;
40 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040041 break;
42 case 2:
43 server->echoes = true;
44 server->oplocks = false;
45 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040046 break;
47 default:
48 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050049 if (enable_oplocks) {
50 server->oplocks = true;
51 server->oplock_credits = 1;
52 } else
53 server->oplocks = false;
54
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040055 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040056 }
57 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080058 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040059}
60
61static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080062smb2_add_credits(struct TCP_Server_Info *server,
63 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040064{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080065 int *val, rc = -1;
Shyam Prasad N6d82c272021-02-03 23:20:46 -080066 int scredits, in_flight;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080067 unsigned int add = credits->value;
68 unsigned int instance = credits->instance;
69 bool reconnect_detected = false;
Shyam Prasad N6d82c272021-02-03 23:20:46 -080070 bool reconnect_with_invalid_credits = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080071
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040072 spin_lock(&server->req_lock);
73 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050074
75 /* eg found case where write overlapping reconnect messed up credits */
76 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
Shyam Prasad N6d82c272021-02-03 23:20:46 -080077 reconnect_with_invalid_credits = true;
78
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080079 if ((instance == 0) || (instance == server->reconnect_instance))
80 *val += add;
81 else
82 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050083
Steve French141891f2016-09-23 00:44:16 -050084 if (*val > 65000) {
85 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
Joe Perchesa0a30362020-04-14 22:42:53 -070086 pr_warn_once("server overflowed SMB3 credits\n");
Steve French141891f2016-09-23 00:44:16 -050087 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040088 server->in_flight--;
Shyam Prasad N0f56db82021-02-03 22:49:52 -080089 if (server->in_flight == 0 &&
90 ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) &&
91 ((optype & CIFS_OP_MASK) != CIFS_SESS_OP))
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040092 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070093 /*
94 * Sometimes server returns 0 credits on oplock break ack - we need to
95 * rebalance credits in this case.
96 */
97 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
98 server->oplocks) {
99 if (server->credits > 1) {
100 server->credits--;
101 server->oplock_credits++;
102 }
103 }
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800104 scredits = *val;
105 in_flight = server->in_flight;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400106 spin_unlock(&server->req_lock);
107 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800108
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800109 if (reconnect_detected) {
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800110 trace_smb3_reconnect_detected(server->CurrentMid,
111 server->conn_id, server->hostname, scredits, add, in_flight);
112
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800113 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
114 add, instance);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800115 }
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800116
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800117 if (reconnect_with_invalid_credits) {
118 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
119 server->conn_id, server->hostname, scredits, add, in_flight);
120 cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
121 optype, scredits, add);
122 }
123
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800124 if (server->tcpStatus == CifsNeedReconnect
125 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800126 return;
127
128 switch (rc) {
129 case -1:
130 /* change_conf hasn't been executed */
131 break;
132 case 0:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000133 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800134 break;
135 case 1:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000136 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800137 break;
138 case 2:
139 cifs_dbg(FYI, "disabling oplocks\n");
140 break;
141 default:
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800142 /* change_conf rebalanced credits for different types */
143 break;
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800144 }
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800145
146 trace_smb3_add_credits(server->CurrentMid,
147 server->conn_id, server->hostname, scredits, add, in_flight);
148 cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400149}
150
151static void
152smb2_set_credits(struct TCP_Server_Info *server, const int val)
153{
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800154 int scredits, in_flight;
155
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400156 spin_lock(&server->req_lock);
157 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500158 if (val == 1)
159 server->reconnect_instance++;
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800160 scredits = server->credits;
161 in_flight = server->in_flight;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400162 spin_unlock(&server->req_lock);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800163
164 trace_smb3_set_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800165 server->conn_id, server->hostname, scredits, val, in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800166 cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
167
Steve French6e4d3bb2018-09-22 11:25:04 -0500168 /* don't log while holding the lock */
169 if (val == 1)
170 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400171}
172
173static int *
174smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
175{
176 switch (optype) {
177 case CIFS_ECHO_OP:
178 return &server->echo_credits;
179 case CIFS_OBREAK_OP:
180 return &server->oplock_credits;
181 default:
182 return &server->credits;
183 }
184}
185
186static unsigned int
187smb2_get_credits(struct mid_q_entry *mid)
188{
Pavel Shilovsky86a79642019-11-21 11:35:13 -0800189 return mid->credits_received;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400190}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400191
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400192static int
193smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800194 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400195{
196 int rc = 0;
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800197 unsigned int scredits, in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400198
199 spin_lock(&server->req_lock);
200 while (1) {
201 if (server->credits <= 0) {
202 spin_unlock(&server->req_lock);
203 cifs_num_waiters_inc(server);
204 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000205 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400206 cifs_num_waiters_dec(server);
207 if (rc)
208 return rc;
209 spin_lock(&server->req_lock);
210 } else {
211 if (server->tcpStatus == CifsExiting) {
212 spin_unlock(&server->req_lock);
213 return -ENOENT;
214 }
215
216 scredits = server->credits;
217 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800218 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400219 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800220 credits->value = 0;
221 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400222 break;
223 }
224
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800225 /* leave some credits for reopen and other ops */
226 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400227 *num = min_t(unsigned int, size,
228 scredits * SMB2_MAX_BUFFER_SIZE);
229
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800230 credits->value =
231 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
232 credits->instance = server->reconnect_instance;
233 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400234 server->in_flight++;
Steve French1b63f182019-09-09 22:57:11 -0500235 if (server->in_flight > server->max_in_flight)
236 server->max_in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400237 break;
238 }
239 }
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800240 scredits = server->credits;
241 in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400242 spin_unlock(&server->req_lock);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800243
244 trace_smb3_add_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800245 server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800246 cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
247 __func__, credits->value, scredits);
248
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400249 return rc;
250}
251
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800252static int
253smb2_adjust_credits(struct TCP_Server_Info *server,
254 struct cifs_credits *credits,
255 const unsigned int payload_size)
256{
257 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800258 int scredits, in_flight;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800259
260 if (!credits->value || credits->value == new_val)
261 return 0;
262
263 if (credits->value < new_val) {
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800264 trace_smb3_too_many_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800265 server->conn_id, server->hostname, 0, credits->value - new_val, 0);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800266 cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
267 credits->value, new_val);
268
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800269 return -ENOTSUPP;
270 }
271
272 spin_lock(&server->req_lock);
273
274 if (server->reconnect_instance != credits->instance) {
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800275 scredits = server->credits;
276 in_flight = server->in_flight;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800277 spin_unlock(&server->req_lock);
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800278
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800279 trace_smb3_reconnect_detected(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800280 server->conn_id, server->hostname, scredits,
281 credits->value - new_val, in_flight);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000282 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800283 credits->value - new_val);
284 return -EAGAIN;
285 }
286
287 server->credits += credits->value - new_val;
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800288 scredits = server->credits;
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800289 in_flight = server->in_flight;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800290 spin_unlock(&server->req_lock);
291 wake_up(&server->request_q);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800292
293 trace_smb3_add_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800294 server->conn_id, server->hostname, scredits,
295 credits->value - new_val, in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800296 cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
297 __func__, credits->value - new_val, scredits);
298
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800299 credits->value = new_val;
300
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800301 return 0;
302}
303
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400304static __u64
305smb2_get_next_mid(struct TCP_Server_Info *server)
306{
307 __u64 mid;
308 /* for SMB2 we need the current value */
309 spin_lock(&GlobalMid_Lock);
310 mid = server->CurrentMid++;
311 spin_unlock(&GlobalMid_Lock);
312 return mid;
313}
Steve French1080ef72011-02-24 18:07:19 +0000314
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800315static void
316smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
317{
318 spin_lock(&GlobalMid_Lock);
319 if (server->CurrentMid >= val)
320 server->CurrentMid -= val;
321 spin_unlock(&GlobalMid_Lock);
322}
323
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400324static struct mid_q_entry *
Rohith Surabattulaac873aa2020-10-29 05:03:10 +0000325__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400326{
327 struct mid_q_entry *mid;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900328 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700329 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400330
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700331 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000332 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600333 return NULL;
334 }
335
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400336 spin_lock(&GlobalMid_Lock);
337 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000338 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400339 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700340 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200341 kref_get(&mid->refcount);
Rohith Surabattulaac873aa2020-10-29 05:03:10 +0000342 if (dequeue) {
343 list_del_init(&mid->qhead);
344 mid->mid_flags |= MID_DELETED;
345 }
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400346 spin_unlock(&GlobalMid_Lock);
347 return mid;
348 }
349 }
350 spin_unlock(&GlobalMid_Lock);
351 return NULL;
352}
353
Rohith Surabattulaac873aa2020-10-29 05:03:10 +0000354static struct mid_q_entry *
355smb2_find_mid(struct TCP_Server_Info *server, char *buf)
356{
357 return __smb2_find_mid(server, buf, false);
358}
359
360static struct mid_q_entry *
361smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
362{
363 return __smb2_find_mid(server, buf, true);
364}
365
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400366static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600367smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400368{
369#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900370 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400371
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000372 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700373 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900374 shdr->Id.SyncId.ProcessId);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000375 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500376 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400377#endif
378}
379
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400380static bool
381smb2_need_neg(struct TCP_Server_Info *server)
382{
383 return server->max_read == 0;
384}
385
386static int
387smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
388{
389 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200390
Steve French0fa757b2021-06-25 13:54:32 -0500391 spin_lock(&GlobalMid_Lock);
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200392 cifs_ses_server(ses)->CurrentMid = 0;
Steve French0fa757b2021-06-25 13:54:32 -0500393 spin_unlock(&GlobalMid_Lock);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400394 rc = SMB2_negotiate(xid, ses);
395 /* BB we probably don't need to retry with modern servers */
396 if (rc == -EAGAIN)
397 rc = -EHOSTDOWN;
398 return rc;
399}
400
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700401static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600402smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700403{
404 struct TCP_Server_Info *server = tcon->ses->server;
405 unsigned int wsize;
406
407 /* start with specified wsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600408 wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE;
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700409 wsize = min_t(unsigned int, wsize, server->max_write);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400410 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
411 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700412
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700413 return wsize;
414}
415
416static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600417smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Steve French3d621232018-09-25 15:33:47 -0500418{
419 struct TCP_Server_Info *server = tcon->ses->server;
420 unsigned int wsize;
421
422 /* start with specified wsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600423 wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE;
Steve French3d621232018-09-25 15:33:47 -0500424 wsize = min_t(unsigned int, wsize, server->max_write);
425#ifdef CONFIG_CIFS_SMB_DIRECT
426 if (server->rdma) {
427 if (server->sign)
Long Lif7950cb2020-03-26 19:42:24 -0700428 /*
429 * Account for SMB2 data transfer packet header and
430 * possible encryption header
431 */
Steve French3d621232018-09-25 15:33:47 -0500432 wsize = min_t(unsigned int,
Long Lif7950cb2020-03-26 19:42:24 -0700433 wsize,
434 server->smbd_conn->max_fragmented_send_size -
435 SMB2_READWRITE_PDU_HEADER_SIZE -
436 sizeof(struct smb2_transform_hdr));
Steve French3d621232018-09-25 15:33:47 -0500437 else
438 wsize = min_t(unsigned int,
439 wsize, server->smbd_conn->max_readwrite_size);
440 }
441#endif
442 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
443 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
444
445 return wsize;
446}
447
448static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600449smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700450{
451 struct TCP_Server_Info *server = tcon->ses->server;
452 unsigned int rsize;
453
454 /* start with specified rsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600455 rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE;
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700456 rsize = min_t(unsigned int, rsize, server->max_read);
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400457
458 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
459 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700460
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700461 return rsize;
462}
463
Steve French3d621232018-09-25 15:33:47 -0500464static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600465smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Steve French3d621232018-09-25 15:33:47 -0500466{
467 struct TCP_Server_Info *server = tcon->ses->server;
468 unsigned int rsize;
469
470 /* start with specified rsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600471 rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE;
Steve French3d621232018-09-25 15:33:47 -0500472 rsize = min_t(unsigned int, rsize, server->max_read);
473#ifdef CONFIG_CIFS_SMB_DIRECT
474 if (server->rdma) {
475 if (server->sign)
Long Lif7950cb2020-03-26 19:42:24 -0700476 /*
477 * Account for SMB2 data transfer packet header and
478 * possible encryption header
479 */
Steve French3d621232018-09-25 15:33:47 -0500480 rsize = min_t(unsigned int,
Long Lif7950cb2020-03-26 19:42:24 -0700481 rsize,
482 server->smbd_conn->max_fragmented_recv_size -
483 SMB2_READWRITE_PDU_HEADER_SIZE -
484 sizeof(struct smb2_transform_hdr));
Steve French3d621232018-09-25 15:33:47 -0500485 else
486 rsize = min_t(unsigned int,
487 rsize, server->smbd_conn->max_readwrite_size);
488 }
489#endif
490
491 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
492 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
493
494 return rsize;
495}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200496
497static int
498parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
499 size_t buf_len,
500 struct cifs_server_iface **iface_list,
501 size_t *iface_count)
502{
503 struct network_interface_info_ioctl_rsp *p;
504 struct sockaddr_in *addr4;
505 struct sockaddr_in6 *addr6;
506 struct iface_info_ipv4 *p4;
507 struct iface_info_ipv6 *p6;
508 struct cifs_server_iface *info;
509 ssize_t bytes_left;
510 size_t next = 0;
511 int nb_iface = 0;
512 int rc = 0;
513
514 *iface_list = NULL;
515 *iface_count = 0;
516
517 /*
518 * Fist pass: count and sanity check
519 */
520
521 bytes_left = buf_len;
522 p = buf;
523 while (bytes_left >= sizeof(*p)) {
524 nb_iface++;
525 next = le32_to_cpu(p->Next);
526 if (!next) {
527 bytes_left -= sizeof(*p);
528 break;
529 }
530 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
531 bytes_left -= next;
532 }
533
534 if (!nb_iface) {
535 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
536 rc = -EINVAL;
537 goto out;
538 }
539
Steve Frenchebcd6de2020-12-08 21:13:31 -0600540 /* Azure rounds the buffer size up 8, to a 16 byte boundary */
541 if ((bytes_left > 8) || p->Next)
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200542 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
543
544
545 /*
546 * Second pass: extract info to internal structure
547 */
548
549 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
550 if (!*iface_list) {
551 rc = -ENOMEM;
552 goto out;
553 }
554
555 info = *iface_list;
556 bytes_left = buf_len;
557 p = buf;
558 while (bytes_left >= sizeof(*p)) {
559 info->speed = le64_to_cpu(p->LinkSpeed);
Hyunchul Leec9c9c682021-07-12 19:34:02 +0900560 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
561 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200562
563 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
564 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
565 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
566 le32_to_cpu(p->Capability));
567
568 switch (p->Family) {
569 /*
570 * The kernel and wire socket structures have the same
571 * layout and use network byte order but make the
572 * conversion explicit in case either one changes.
573 */
574 case INTERNETWORK:
575 addr4 = (struct sockaddr_in *)&info->sockaddr;
576 p4 = (struct iface_info_ipv4 *)p->Buffer;
577 addr4->sin_family = AF_INET;
578 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
579
580 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
581 addr4->sin_port = cpu_to_be16(CIFS_PORT);
582
583 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
584 &addr4->sin_addr);
585 break;
586 case INTERNETWORKV6:
587 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
588 p6 = (struct iface_info_ipv6 *)p->Buffer;
589 addr6->sin6_family = AF_INET6;
590 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
591
592 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
593 addr6->sin6_flowinfo = 0;
594 addr6->sin6_scope_id = 0;
595 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
596
597 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
598 &addr6->sin6_addr);
599 break;
600 default:
601 cifs_dbg(VFS,
602 "%s: skipping unsupported socket family\n",
603 __func__);
604 goto next_iface;
605 }
606
607 (*iface_count)++;
608 info++;
609next_iface:
610 next = le32_to_cpu(p->Next);
611 if (!next)
612 break;
613 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
614 bytes_left -= next;
615 }
616
617 if (!*iface_count) {
618 rc = -EINVAL;
619 goto out;
620 }
621
622out:
623 if (rc) {
624 kfree(*iface_list);
625 *iface_count = 0;
626 *iface_list = NULL;
627 }
628 return rc;
629}
630
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200631static int compare_iface(const void *ia, const void *ib)
632{
633 const struct cifs_server_iface *a = (struct cifs_server_iface *)ia;
634 const struct cifs_server_iface *b = (struct cifs_server_iface *)ib;
635
636 return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1);
637}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200638
Steve Frenchc481e9f2013-10-14 01:21:53 -0500639static int
640SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
641{
642 int rc;
643 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200644 struct network_interface_info_ioctl_rsp *out_buf = NULL;
645 struct cifs_server_iface *iface_list;
646 size_t iface_count;
647 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500648
649 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
650 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
651 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500652 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500653 if (rc == -EOPNOTSUPP) {
654 cifs_dbg(FYI,
655 "server does not support query network interfaces\n");
656 goto out;
657 } else if (rc != 0) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000658 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200659 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500660 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200661
662 rc = parse_server_interfaces(out_buf, ret_data_len,
663 &iface_list, &iface_count);
664 if (rc)
665 goto out;
666
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200667 /* sort interfaces from fastest to slowest */
668 sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL);
669
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200670 spin_lock(&ses->iface_lock);
671 kfree(ses->iface_list);
672 ses->iface_list = iface_list;
673 ses->iface_count = iface_count;
674 ses->iface_last_update = jiffies;
675 spin_unlock(&ses->iface_lock);
676
677out:
Steve French24df1482016-09-29 04:20:23 -0500678 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500679 return rc;
680}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500681
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000682static void
683smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000684{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000685 struct cached_fid *cfid = container_of(ref, struct cached_fid,
686 refcount);
687
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000688 if (cfid->is_valid) {
689 cifs_dbg(FYI, "clear cached root file handle\n");
690 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
691 cfid->fid->volatile_fid);
Enzo Matsumiya93515902021-09-09 18:46:45 -0300692 }
693
694 /*
695 * We only check validity above to send SMB2_close,
696 * but we still need to invalidate these entries
697 * when this function is called
698 */
699 cfid->is_valid = false;
700 cfid->file_all_info_is_valid = false;
701 cfid->has_lease = false;
702 if (cfid->dentry) {
703 dput(cfid->dentry);
704 cfid->dentry = NULL;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000705 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000706}
707
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000708void close_cached_dir(struct cached_fid *cfid)
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000709{
710 mutex_lock(&cfid->fid_mutex);
711 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000712 mutex_unlock(&cfid->fid_mutex);
713}
714
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000715void close_cached_dir_lease_locked(struct cached_fid *cfid)
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800716{
717 if (cfid->has_lease) {
718 cfid->has_lease = false;
719 kref_put(&cfid->refcount, smb2_close_cached_fid);
720 }
721}
722
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000723void close_cached_dir_lease(struct cached_fid *cfid)
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800724{
725 mutex_lock(&cfid->fid_mutex);
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000726 close_cached_dir_lease_locked(cfid);
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800727 mutex_unlock(&cfid->fid_mutex);
728}
729
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000730void
731smb2_cached_lease_break(struct work_struct *work)
732{
733 struct cached_fid *cfid = container_of(work,
734 struct cached_fid, lease_break);
735
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000736 close_cached_dir_lease(cfid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000737}
738
Steve French3d4ef9a2018-04-25 22:19:09 -0500739/*
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000740 * Open the and cache a directory handle.
741 * Only supported for the root handle.
Steve French3d4ef9a2018-04-25 22:19:09 -0500742 */
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000743int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlberge6eb1952021-03-09 09:07:28 +1000744 const char *path,
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000745 struct cifs_sb_info *cifs_sb,
746 struct cached_fid **cfid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500747{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000748 struct cifs_ses *ses = tcon->ses;
749 struct TCP_Server_Info *server = ses->server;
750 struct cifs_open_parms oparms;
751 struct smb2_create_rsp *o_rsp = NULL;
752 struct smb2_query_info_rsp *qi_rsp = NULL;
753 int resp_buftype[2];
754 struct smb_rqst rqst[2];
755 struct kvec rsp_iov[2];
756 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
757 struct kvec qi_iov[1];
758 int rc, flags = 0;
759 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000760 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000761 struct cifs_fid *pfid;
Ronnie Sahlberg5e9c89d2021-03-09 09:07:31 +1000762 struct dentry *dentry;
Steve French3d4ef9a2018-04-25 22:19:09 -0500763
Ronnie Sahlberg4df3d972021-03-09 09:07:27 +1000764 if (tcon->nohandlecache)
765 return -ENOTSUPP;
766
Ronnie Sahlberg269f67e2021-03-09 09:07:30 +1000767 if (cifs_sb->root == NULL)
768 return -ENOENT;
769
Ronnie Sahlberge6eb1952021-03-09 09:07:28 +1000770 if (strlen(path))
Ronnie Sahlberg269f67e2021-03-09 09:07:30 +1000771 return -ENOENT;
Ronnie Sahlberge6eb1952021-03-09 09:07:28 +1000772
Ronnie Sahlberg5e9c89d2021-03-09 09:07:31 +1000773 dentry = cifs_sb->root;
774
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000775 mutex_lock(&tcon->crfid.fid_mutex);
776 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500777 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000778 *cfid = &tcon->crfid;
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000779 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000780 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500781 return 0;
782 }
783
Steve French96d9f7e2019-09-12 17:52:54 -0500784 /*
785 * We do not hold the lock for the open because in case
786 * SMB2_open needs to reconnect, it will end up calling
787 * cifs_mark_open_files_invalid() which takes the lock again
788 * thus causing a deadlock
789 */
790
791 mutex_unlock(&tcon->crfid.fid_mutex);
792
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000793 if (smb3_encryption_required(tcon))
794 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500795
Paulo Alcantara0fe07812020-04-20 23:44:24 -0300796 if (!server->ops->new_lease_key)
797 return -EIO;
798
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000799 pfid = tcon->crfid.fid;
Paulo Alcantara0fe07812020-04-20 23:44:24 -0300800 server->ops->new_lease_key(pfid);
801
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000802 memset(rqst, 0, sizeof(rqst));
803 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
804 memset(rsp_iov, 0, sizeof(rsp_iov));
805
806 /* Open */
807 memset(&open_iov, 0, sizeof(open_iov));
808 rqst[0].rq_iov = open_iov;
809 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
810
811 oparms.tcon = tcon;
Amir Goldstein0f060932020-02-03 21:46:43 +0200812 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000813 oparms.desired_access = FILE_READ_ATTRIBUTES;
814 oparms.disposition = FILE_OPEN;
815 oparms.fid = pfid;
816 oparms.reconnect = false;
817
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500818 rc = SMB2_open_init(tcon, server,
819 &rqst[0], &oplock, &oparms, &utf16_path);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000820 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500821 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000822 smb2_set_next_command(tcon, &rqst[0]);
823
824 memset(&qi_iov, 0, sizeof(qi_iov));
825 rqst[1].rq_iov = qi_iov;
826 rqst[1].rq_nvec = 1;
827
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500828 rc = SMB2_query_info_init(tcon, server,
829 &rqst[1], COMPOUND_FID,
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000830 COMPOUND_FID, FILE_ALL_INFORMATION,
831 SMB2_O_INFO_FILE, 0,
832 sizeof(struct smb2_file_all_info) +
833 PATH_MAX * 2, 0, NULL);
834 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500835 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000836
837 smb2_set_related(&rqst[1]);
838
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500839 rc = compound_send_recv(xid, ses, server,
840 flags, 2, rqst,
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000841 resp_buftype, rsp_iov);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200842 mutex_lock(&tcon->crfid.fid_mutex);
843
844 /*
845 * Now we need to check again as the cached root might have
846 * been successfully re-opened from a concurrent process
847 */
848
849 if (tcon->crfid.is_valid) {
850 /* work was already done */
851
852 /* stash fids for close() later */
853 struct cifs_fid fid = {
854 .persistent_fid = pfid->persistent_fid,
855 .volatile_fid = pfid->volatile_fid,
856 };
857
858 /*
Muhammad Usama Anjumad7567b2021-04-15 20:24:09 +0500859 * caller expects this func to set the fid in crfid to valid
860 * cached root, so increment the refcount.
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200861 */
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200862 kref_get(&tcon->crfid.refcount);
863
864 mutex_unlock(&tcon->crfid.fid_mutex);
865
866 if (rc == 0) {
867 /* close extra handle outside of crit sec */
868 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
869 }
Xiyu Yang77577de2020-06-13 20:27:09 +0800870 rc = 0;
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200871 goto oshr_free;
872 }
873
874 /* Cached root is still invalid, continue normaly */
875
Steve French7dcc82c2019-09-11 00:07:36 -0500876 if (rc) {
877 if (rc == -EREMCHG) {
878 tcon->need_reconnect = true;
Joe Perchesa0a30362020-04-14 22:42:53 -0700879 pr_warn_once("server share %s deleted\n",
880 tcon->treeName);
Steve French7dcc82c2019-09-11 00:07:36 -0500881 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000882 goto oshr_exit;
Steve French7dcc82c2019-09-11 00:07:36 -0500883 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000884
Steve Frenchd2f15422019-09-22 00:55:46 -0500885 atomic_inc(&tcon->num_remote_opens);
886
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000887 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
Ronnie Sahlbergc4628702021-09-08 12:10:15 +1000888 oparms.fid->persistent_fid = le64_to_cpu(o_rsp->PersistentFileId);
889 oparms.fid->volatile_fid = le64_to_cpu(o_rsp->VolatileFileId);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000890#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +0900891 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000892#endif /* CIFS_DEBUG2 */
893
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000894 tcon->crfid.tcon = tcon;
895 tcon->crfid.is_valid = true;
Ronnie Sahlberg5e9c89d2021-03-09 09:07:31 +1000896 tcon->crfid.dentry = dentry;
897 dget(dentry);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000898 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000899
Steve French89a5bfa2019-07-18 17:22:18 -0500900 /* BB TBD check to see if oplock level check can be removed below */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000901 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
Muhammad Usama Anjumad7567b2021-04-15 20:24:09 +0500902 /*
903 * See commit 2f94a3125b87. Increment the refcount when we
904 * get a lease for root, release it if lease break occurs
905 */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000906 kref_get(&tcon->crfid.refcount);
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800907 tcon->crfid.has_lease = true;
Steve French89a5bfa2019-07-18 17:22:18 -0500908 smb2_parse_contexts(server, o_rsp,
909 &oparms.fid->epoch,
Aurelien Aptel69dda302020-03-02 17:53:22 +0100910 oparms.fid->lease_key, &oplock,
911 NULL, NULL);
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000912 } else
913 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000914
915 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
916 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
917 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000918 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000919 le16_to_cpu(qi_rsp->OutputBufferOffset),
920 sizeof(struct smb2_file_all_info),
921 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000922 (char *)&tcon->crfid.file_all_info))
zhengbin720aec02019-12-25 11:30:20 +0800923 tcon->crfid.file_all_info_is_valid = true;
Ronnie Sahlberged20f542021-03-09 09:07:33 +1000924 tcon->crfid.time = jiffies;
925
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000926
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200927oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000928 mutex_unlock(&tcon->crfid.fid_mutex);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200929oshr_free:
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000930 SMB2_open_free(&rqst[0]);
931 SMB2_query_info_free(&rqst[1]);
932 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
933 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000934 if (rc == 0)
935 *cfid = &tcon->crfid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500936 return rc;
937}
938
Ronnie Sahlberg6ef4e9c2021-03-09 09:07:32 +1000939int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
940 struct dentry *dentry,
941 struct cached_fid **cfid)
942{
943 mutex_lock(&tcon->crfid.fid_mutex);
944 if (tcon->crfid.dentry == dentry) {
945 cifs_dbg(FYI, "found a cached root file handle by dentry\n");
946 *cfid = &tcon->crfid;
947 kref_get(&tcon->crfid.refcount);
948 mutex_unlock(&tcon->crfid.fid_mutex);
949 return 0;
950 }
951 mutex_unlock(&tcon->crfid.fid_mutex);
952 return -ENOENT;
953}
954
Steve French34f62642013-10-09 02:07:00 -0500955static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200956smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
957 struct cifs_sb_info *cifs_sb)
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500958{
959 int rc;
960 __le16 srch_path = 0; /* Null - open root of share */
961 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
962 struct cifs_open_parms oparms;
963 struct cifs_fid fid;
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000964 struct cached_fid *cfid = NULL;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500965
966 oparms.tcon = tcon;
967 oparms.desired_access = FILE_READ_ATTRIBUTES;
968 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200969 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500970 oparms.fid = &fid;
971 oparms.reconnect = false;
972
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000973 rc = open_cached_dir(xid, tcon, "", cifs_sb, &cfid);
Ronnie Sahlberg4df3d972021-03-09 09:07:27 +1000974 if (rc == 0)
975 memcpy(&fid, cfid->fid, sizeof(struct cifs_fid));
976 else
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000977 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
Aurelien Aptel69dda302020-03-02 17:53:22 +0100978 NULL, NULL);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500979 if (rc)
980 return;
981
Steve Frenchc481e9f2013-10-14 01:21:53 -0500982 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500983
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500984 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
985 FS_ATTRIBUTE_INFORMATION);
986 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
987 FS_DEVICE_INFORMATION);
988 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500989 FS_VOLUME_INFORMATION);
990 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500991 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Ronnie Sahlberg4df3d972021-03-09 09:07:27 +1000992 if (cfid == NULL)
Steve French3d4ef9a2018-04-25 22:19:09 -0500993 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000994 else
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000995 close_cached_dir(cfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500996}
997
998static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200999smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
1000 struct cifs_sb_info *cifs_sb)
Steve French34f62642013-10-09 02:07:00 -05001001{
1002 int rc;
1003 __le16 srch_path = 0; /* Null - open root of share */
1004 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1005 struct cifs_open_parms oparms;
1006 struct cifs_fid fid;
1007
1008 oparms.tcon = tcon;
1009 oparms.desired_access = FILE_READ_ATTRIBUTES;
1010 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001011 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French34f62642013-10-09 02:07:00 -05001012 oparms.fid = &fid;
1013 oparms.reconnect = false;
1014
Aurelien Aptel69dda302020-03-02 17:53:22 +01001015 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
1016 NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -05001017 if (rc)
1018 return;
1019
Steven French21671142013-10-09 13:36:35 -05001020 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
1021 FS_ATTRIBUTE_INFORMATION);
1022 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
1023 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -05001024 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -05001025}
1026
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001027static int
1028smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
1029 struct cifs_sb_info *cifs_sb, const char *full_path)
1030{
1031 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001032 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001033 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001034 struct cifs_open_parms oparms;
1035 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001036
Ronnie Sahlberga93864d2018-06-14 06:48:35 +10001037 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -05001038 return 0;
1039
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001040 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
1041 if (!utf16_path)
1042 return -ENOMEM;
1043
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001044 oparms.tcon = tcon;
1045 oparms.desired_access = FILE_READ_ATTRIBUTES;
1046 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001047 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001048 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001049 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001050
Aurelien Aptel69dda302020-03-02 17:53:22 +01001051 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
1052 NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001053 if (rc) {
1054 kfree(utf16_path);
1055 return rc;
1056 }
1057
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001058 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001059 kfree(utf16_path);
1060 return rc;
1061}
1062
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001063static int
1064smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
1065 struct cifs_sb_info *cifs_sb, const char *full_path,
1066 u64 *uniqueid, FILE_ALL_INFO *data)
1067{
1068 *uniqueid = le64_to_cpu(data->IndexNumber);
1069 return 0;
1070}
1071
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07001072static int
1073smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
1074 struct cifs_fid *fid, FILE_ALL_INFO *data)
1075{
1076 int rc;
1077 struct smb2_file_all_info *smb2_data;
1078
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +04001079 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07001080 GFP_KERNEL);
1081 if (smb2_data == NULL)
1082 return -ENOMEM;
1083
1084 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
1085 smb2_data);
1086 if (!rc)
1087 move_smb2_info_to_cifs(data, smb2_data);
1088 kfree(smb2_data);
1089 return rc;
1090}
1091
Arnd Bergmann1368f152017-09-05 11:24:15 +02001092#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001093static ssize_t
1094move_smb2_ea_to_cifs(char *dst, size_t dst_size,
1095 struct smb2_file_full_ea_info *src, size_t src_size,
1096 const unsigned char *ea_name)
1097{
1098 int rc = 0;
1099 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
1100 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001101 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001102 size_t name_len, value_len, user_name_len;
1103
1104 while (src_size > 0) {
1105 name = &src->ea_data[0];
1106 name_len = (size_t)src->ea_name_length;
1107 value = &src->ea_data[src->ea_name_length + 1];
1108 value_len = (size_t)le16_to_cpu(src->ea_value_length);
1109
Christoph Probsta205d502019-05-08 21:36:25 +02001110 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001111 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001112
1113 if (src_size < 8 + name_len + 1 + value_len) {
1114 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
1115 rc = -EIO;
1116 goto out;
1117 }
1118
1119 if (ea_name) {
1120 if (ea_name_len == name_len &&
1121 memcmp(ea_name, name, name_len) == 0) {
1122 rc = value_len;
1123 if (dst_size == 0)
1124 goto out;
1125 if (dst_size < value_len) {
1126 rc = -ERANGE;
1127 goto out;
1128 }
1129 memcpy(dst, value, value_len);
1130 goto out;
1131 }
1132 } else {
1133 /* 'user.' plus a terminating null */
1134 user_name_len = 5 + 1 + name_len;
1135
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001136 if (buf_size == 0) {
1137 /* skip copy - calc size only */
1138 rc += user_name_len;
1139 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001140 dst_size -= user_name_len;
1141 memcpy(dst, "user.", 5);
1142 dst += 5;
1143 memcpy(dst, src->ea_data, name_len);
1144 dst += name_len;
1145 *dst = 0;
1146 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001147 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001148 } else {
1149 /* stop before overrun buffer */
1150 rc = -ERANGE;
1151 break;
1152 }
1153 }
1154
1155 if (!src->next_entry_offset)
1156 break;
1157
1158 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1159 /* stop before overrun buffer */
1160 rc = -ERANGE;
1161 break;
1162 }
1163 src_size -= le32_to_cpu(src->next_entry_offset);
1164 src = (void *)((char *)src +
1165 le32_to_cpu(src->next_entry_offset));
1166 }
1167
1168 /* didn't find the named attribute */
1169 if (ea_name)
1170 rc = -ENODATA;
1171
1172out:
1173 return (ssize_t)rc;
1174}
1175
1176static ssize_t
1177smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1178 const unsigned char *path, const unsigned char *ea_name,
1179 char *ea_data, size_t buf_size,
1180 struct cifs_sb_info *cifs_sb)
1181{
1182 int rc;
1183 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001184 struct kvec rsp_iov = {NULL, 0};
1185 int buftype = CIFS_NO_BUFFER;
1186 struct smb2_query_info_rsp *rsp;
1187 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001188
1189 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1190 if (!utf16_path)
1191 return -ENOMEM;
1192
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001193 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1194 FILE_READ_EA,
1195 FILE_FULL_EA_INFORMATION,
1196 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +10001197 CIFSMaxBufSize -
1198 MAX_SMB2_CREATE_RESPONSE_SIZE -
1199 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001200 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001201 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001202 /*
1203 * If ea_name is NULL (listxattr) and there are no EAs,
1204 * return 0 as it's not an error. Otherwise, the specified
1205 * ea_name was not found.
1206 */
1207 if (!ea_name && rc == -ENODATA)
1208 rc = 0;
1209 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001210 }
1211
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001212 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1213 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1214 le32_to_cpu(rsp->OutputBufferLength),
1215 &rsp_iov,
1216 sizeof(struct smb2_file_full_ea_info));
1217 if (rc)
1218 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001219
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001220 info = (struct smb2_file_full_ea_info *)(
1221 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1222 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1223 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001224
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001225 qeas_exit:
1226 kfree(utf16_path);
1227 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001228 return rc;
1229}
1230
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001231
1232static int
1233smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1234 const char *path, const char *ea_name, const void *ea_value,
1235 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1236 struct cifs_sb_info *cifs_sb)
1237{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001238 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001239 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001240 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001241 int ea_name_len = strlen(ea_name);
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03001242 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001243 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001244 struct smb_rqst rqst[3];
1245 int resp_buftype[3];
1246 struct kvec rsp_iov[3];
1247 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1248 struct cifs_open_parms oparms;
1249 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1250 struct cifs_fid fid;
1251 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1252 unsigned int size[1];
1253 void *data[1];
1254 struct smb2_file_full_ea_info *ea = NULL;
1255 struct kvec close_iov[1];
Ronnie Sahlberg85db6b72020-02-13 12:14:47 +10001256 struct smb2_query_info_rsp *rsp;
1257 int rc, used_len = 0;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001258
1259 if (smb3_encryption_required(tcon))
1260 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001261
1262 if (ea_name_len > 255)
1263 return -EINVAL;
1264
1265 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1266 if (!utf16_path)
1267 return -ENOMEM;
1268
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001269 memset(rqst, 0, sizeof(rqst));
1270 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1271 memset(rsp_iov, 0, sizeof(rsp_iov));
1272
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001273 if (ses->server->ops->query_all_EAs) {
1274 if (!ea_value) {
1275 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1276 ea_name, NULL, 0,
1277 cifs_sb);
1278 if (rc == -ENODATA)
1279 goto sea_exit;
Ronnie Sahlberg85db6b72020-02-13 12:14:47 +10001280 } else {
1281 /* If we are adding a attribute we should first check
1282 * if there will be enough space available to store
1283 * the new EA. If not we should not add it since we
1284 * would not be able to even read the EAs back.
1285 */
1286 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1287 FILE_READ_EA,
1288 FILE_FULL_EA_INFORMATION,
1289 SMB2_O_INFO_FILE,
1290 CIFSMaxBufSize -
1291 MAX_SMB2_CREATE_RESPONSE_SIZE -
1292 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1293 &rsp_iov[1], &resp_buftype[1], cifs_sb);
1294 if (rc == 0) {
1295 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1296 used_len = le32_to_cpu(rsp->OutputBufferLength);
1297 }
1298 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1299 resp_buftype[1] = CIFS_NO_BUFFER;
1300 memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1301 rc = 0;
1302
1303 /* Use a fudge factor of 256 bytes in case we collide
1304 * with a different set_EAs command.
1305 */
1306 if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1307 MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1308 used_len + ea_name_len + ea_value_len + 1) {
1309 rc = -ENOSPC;
1310 goto sea_exit;
1311 }
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001312 }
1313 }
1314
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001315 /* Open */
1316 memset(&open_iov, 0, sizeof(open_iov));
1317 rqst[0].rq_iov = open_iov;
1318 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1319
1320 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001321 oparms.tcon = tcon;
1322 oparms.desired_access = FILE_WRITE_EA;
1323 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001324 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001325 oparms.fid = &fid;
1326 oparms.reconnect = false;
1327
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001328 rc = SMB2_open_init(tcon, server,
1329 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001330 if (rc)
1331 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001332 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001333
1334
1335 /* Set Info */
1336 memset(&si_iov, 0, sizeof(si_iov));
1337 rqst[1].rq_iov = si_iov;
1338 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001339
Vladimir Zapolskiy64b7f672020-10-10 21:25:54 +03001340 len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001341 ea = kzalloc(len, GFP_KERNEL);
1342 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001343 rc = -ENOMEM;
1344 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001345 }
1346
1347 ea->ea_name_length = ea_name_len;
1348 ea->ea_value_length = cpu_to_le16(ea_value_len);
1349 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1350 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1351
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001352 size[0] = len;
1353 data[0] = ea;
1354
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001355 rc = SMB2_set_info_init(tcon, server,
1356 &rqst[1], COMPOUND_FID,
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001357 COMPOUND_FID, current->tgid,
1358 FILE_FULL_EA_INFORMATION,
1359 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001360 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001361 smb2_set_related(&rqst[1]);
1362
1363
1364 /* Close */
1365 memset(&close_iov, 0, sizeof(close_iov));
1366 rqst[2].rq_iov = close_iov;
1367 rqst[2].rq_nvec = 1;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001368 rc = SMB2_close_init(tcon, server,
1369 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001370 smb2_set_related(&rqst[2]);
1371
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001372 rc = compound_send_recv(xid, ses, server,
1373 flags, 3, rqst,
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001374 resp_buftype, rsp_iov);
Steve Frenchd2f15422019-09-22 00:55:46 -05001375 /* no need to bump num_remote_opens because handle immediately closed */
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001376
1377 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001378 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001379 kfree(utf16_path);
1380 SMB2_open_free(&rqst[0]);
1381 SMB2_set_info_free(&rqst[1]);
1382 SMB2_close_free(&rqst[2]);
1383 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1384 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1385 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001386 return rc;
1387}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001388#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001389
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001390static bool
1391smb2_can_echo(struct TCP_Server_Info *server)
1392{
1393 return server->echoes;
1394}
1395
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001396static void
1397smb2_clear_stats(struct cifs_tcon *tcon)
1398{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001399 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001400
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001401 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1402 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1403 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1404 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001405}
1406
1407static void
Steve French769ee6a2013-06-19 14:15:30 -05001408smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1409{
1410 seq_puts(m, "\n\tShare Capabilities:");
1411 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1412 seq_puts(m, " DFS,");
1413 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1414 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1415 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1416 seq_puts(m, " SCALEOUT,");
1417 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1418 seq_puts(m, " CLUSTER,");
1419 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1420 seq_puts(m, " ASYMMETRIC,");
1421 if (tcon->capabilities == 0)
1422 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001423 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1424 seq_puts(m, " Aligned,");
1425 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1426 seq_puts(m, " Partition Aligned,");
1427 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1428 seq_puts(m, " SSD,");
1429 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1430 seq_puts(m, " TRIM-support,");
1431
Steve French769ee6a2013-06-19 14:15:30 -05001432 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001433 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001434 if (tcon->perf_sector_size)
1435 seq_printf(m, "\tOptimal sector size: 0x%x",
1436 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001437 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001438}
1439
1440static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001441smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1442{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001443 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1444 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001445
1446 /*
1447 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1448 * totals (requests sent) since those SMBs are per-session not per tcon
1449 */
Steve French52ce1ac2018-07-31 01:46:47 -05001450 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1451 (long long)(tcon->bytes_read),
1452 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001453 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1454 atomic_read(&tcon->num_local_opens),
1455 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001456 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001457 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1458 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001459 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001460 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1461 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001462 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001463 atomic_read(&sent[SMB2_CREATE_HE]),
1464 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001465 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001466 atomic_read(&sent[SMB2_CLOSE_HE]),
1467 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001468 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001469 atomic_read(&sent[SMB2_FLUSH_HE]),
1470 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001471 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001472 atomic_read(&sent[SMB2_READ_HE]),
1473 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001474 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001475 atomic_read(&sent[SMB2_WRITE_HE]),
1476 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001477 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001478 atomic_read(&sent[SMB2_LOCK_HE]),
1479 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001480 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001481 atomic_read(&sent[SMB2_IOCTL_HE]),
1482 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001483 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001484 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1485 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001486 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001487 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1488 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001489 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001490 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1491 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001492 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001493 atomic_read(&sent[SMB2_SET_INFO_HE]),
1494 atomic_read(&failed[SMB2_SET_INFO_HE]));
1495 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1496 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1497 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001498}
1499
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001500static void
1501smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1502{
David Howells2b0143b2015-03-17 22:25:59 +00001503 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001504 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1505
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001506 cfile->fid.persistent_fid = fid->persistent_fid;
1507 cfile->fid.volatile_fid = fid->volatile_fid;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01001508 cfile->fid.access = fid->access;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001509#ifdef CONFIG_CIFS_DEBUG2
1510 cfile->fid.mid = fid->mid;
1511#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001512 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1513 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001514 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001515 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001516}
1517
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001518static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001519smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1520 struct cifs_fid *fid)
1521{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001522 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001523}
1524
Steve French43f8a6a2019-12-02 21:46:54 -06001525static void
1526smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
1527 struct cifsFileInfo *cfile)
1528{
1529 struct smb2_file_network_open_info file_inf;
1530 struct inode *inode;
1531 int rc;
1532
1533 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
1534 cfile->fid.volatile_fid, &file_inf);
1535 if (rc)
1536 return;
1537
1538 inode = d_inode(cfile->dentry);
1539
1540 spin_lock(&inode->i_lock);
1541 CIFS_I(inode)->time = jiffies;
1542
1543 /* Creation time should not need to be updated on close */
1544 if (file_inf.LastWriteTime)
1545 inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
1546 if (file_inf.ChangeTime)
1547 inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
1548 if (file_inf.LastAccessTime)
1549 inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
1550
1551 /*
1552 * i_blocks is not related to (i_size / i_blksize),
1553 * but instead 512 byte (2**9) size is required for
1554 * calculating num blocks.
1555 */
1556 if (le64_to_cpu(file_inf.AllocationSize) > 4096)
1557 inode->i_blocks =
1558 (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
1559
1560 /* End of file and Attributes should not have to be updated on close */
1561 spin_unlock(&inode->i_lock);
1562}
1563
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001564static int
Steve French41c13582013-11-14 00:05:36 -06001565SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1566 u64 persistent_fid, u64 volatile_fid,
1567 struct copychunk_ioctl *pcchunk)
1568{
1569 int rc;
1570 unsigned int ret_data_len;
1571 struct resume_key_req *res_key;
1572
1573 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1574 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001575 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001576 (char **)&res_key, &ret_data_len);
1577
Steve French423333b2021-04-19 23:22:37 -05001578 if (rc == -EOPNOTSUPP) {
1579 pr_warn_once("Server share %s does not support copy range\n", tcon->treeName);
1580 goto req_res_key_exit;
1581 } else if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001582 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
Steve French41c13582013-11-14 00:05:36 -06001583 goto req_res_key_exit;
1584 }
1585 if (ret_data_len < sizeof(struct resume_key_req)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001586 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
Steve French41c13582013-11-14 00:05:36 -06001587 rc = -EINVAL;
1588 goto req_res_key_exit;
1589 }
1590 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1591
1592req_res_key_exit:
1593 kfree(res_key);
1594 return rc;
1595}
1596
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001597struct iqi_vars {
1598 struct smb_rqst rqst[3];
1599 struct kvec rsp_iov[3];
1600 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1601 struct kvec qi_iov[1];
1602 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
1603 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1604 struct kvec close_iov[1];
1605};
1606
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001607static int
1608smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001609 struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02001610 struct cifs_sb_info *cifs_sb,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001611 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001612 unsigned long p)
1613{
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001614 struct iqi_vars *vars;
1615 struct smb_rqst *rqst;
1616 struct kvec *rsp_iov;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001617 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001618 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001619 char __user *arg = (char __user *)p;
1620 struct smb_query_info qi;
1621 struct smb_query_info __user *pqi;
1622 int rc = 0;
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03001623 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001624 struct smb2_query_info_rsp *qi_rsp = NULL;
1625 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001626 void *buffer = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001627 int resp_buftype[3];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001628 struct cifs_open_parms oparms;
1629 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1630 struct cifs_fid fid;
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001631 unsigned int size[2];
1632 void *data[2];
Amir Goldstein0f060932020-02-03 21:46:43 +02001633 int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001634
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001635 vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
1636 if (vars == NULL)
1637 return -ENOMEM;
1638 rqst = &vars->rqst[0];
1639 rsp_iov = &vars->rsp_iov[0];
1640
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001641 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001642
1643 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001644 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001645
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001646 if (qi.output_buffer_length > 1024) {
1647 kfree(vars);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001648 return -EINVAL;
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001649 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001650
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001651 if (!ses || !server) {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001652 kfree(vars);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001653 return -EIO;
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001654 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001655
1656 if (smb3_encryption_required(tcon))
1657 flags |= CIFS_TRANSFORM_REQ;
1658
Markus Elfringcfaa1182019-11-05 21:30:25 +01001659 buffer = memdup_user(arg + sizeof(struct smb_query_info),
1660 qi.output_buffer_length);
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001661 if (IS_ERR(buffer)) {
1662 kfree(vars);
Markus Elfringcfaa1182019-11-05 21:30:25 +01001663 return PTR_ERR(buffer);
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001664 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001665
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001666 /* Open */
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001667 rqst[0].rq_iov = &vars->open_iov[0];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001668 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001669
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001670 memset(&oparms, 0, sizeof(oparms));
1671 oparms.tcon = tcon;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001672 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001673 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001674 oparms.fid = &fid;
1675 oparms.reconnect = false;
1676
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001677 if (qi.flags & PASSTHRU_FSCTL) {
1678 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1679 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1680 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001681 break;
1682 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1683 oparms.desired_access = GENERIC_ALL;
1684 break;
1685 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1686 oparms.desired_access = GENERIC_READ;
1687 break;
1688 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1689 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001690 break;
1691 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001692 } else if (qi.flags & PASSTHRU_SET_INFO) {
1693 oparms.desired_access = GENERIC_WRITE;
1694 } else {
1695 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001696 }
1697
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001698 rc = SMB2_open_init(tcon, server,
1699 &rqst[0], &oplock, &oparms, path);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001700 if (rc)
1701 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001702 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001703
1704 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001705 if (qi.flags & PASSTHRU_FSCTL) {
1706 /* Can eventually relax perm check since server enforces too */
1707 if (!capable(CAP_SYS_ADMIN))
1708 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001709 else {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001710 rqst[1].rq_iov = &vars->io_iov[0];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001711 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1712
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001713 rc = SMB2_ioctl_init(tcon, server,
1714 &rqst[1],
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001715 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001716 qi.info_type, true, buffer,
1717 qi.output_buffer_length,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10001718 CIFSMaxBufSize -
1719 MAX_SMB2_CREATE_RESPONSE_SIZE -
1720 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001721 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001722 } else if (qi.flags == PASSTHRU_SET_INFO) {
1723 /* Can eventually relax perm check since server enforces too */
1724 if (!capable(CAP_SYS_ADMIN))
1725 rc = -EPERM;
1726 else {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001727 rqst[1].rq_iov = &vars->si_iov[0];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001728 rqst[1].rq_nvec = 1;
1729
1730 size[0] = 8;
1731 data[0] = buffer;
1732
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001733 rc = SMB2_set_info_init(tcon, server,
1734 &rqst[1],
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001735 COMPOUND_FID, COMPOUND_FID,
1736 current->tgid,
1737 FILE_END_OF_FILE_INFORMATION,
1738 SMB2_O_INFO_FILE, 0, data, size);
1739 }
Steve French31ba4332019-03-13 02:40:07 -05001740 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001741 rqst[1].rq_iov = &vars->qi_iov[0];
Steve French31ba4332019-03-13 02:40:07 -05001742 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001743
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001744 rc = SMB2_query_info_init(tcon, server,
1745 &rqst[1], COMPOUND_FID,
Steve French31ba4332019-03-13 02:40:07 -05001746 COMPOUND_FID, qi.file_info_class,
1747 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001748 qi.input_buffer_length,
1749 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001750 } else { /* unknown flags */
Joe Perchesa0a30362020-04-14 22:42:53 -07001751 cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
1752 qi.flags);
Steve French31ba4332019-03-13 02:40:07 -05001753 rc = -EINVAL;
1754 }
1755
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001756 if (rc)
1757 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001758 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001759 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001760
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001761 /* Close */
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001762 rqst[2].rq_iov = &vars->close_iov[0];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001763 rqst[2].rq_nvec = 1;
1764
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001765 rc = SMB2_close_init(tcon, server,
1766 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001767 if (rc)
1768 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001769 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001770
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001771 rc = compound_send_recv(xid, ses, server,
1772 flags, 3, rqst,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001773 resp_buftype, rsp_iov);
1774 if (rc)
1775 goto iqinf_exit;
Steve Frenchd2f15422019-09-22 00:55:46 -05001776
1777 /* No need to bump num_remote_opens since handle immediately closed */
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001778 if (qi.flags & PASSTHRU_FSCTL) {
1779 pqi = (struct smb_query_info __user *)arg;
1780 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1781 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1782 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001783 if (qi.input_buffer_length > 0 &&
Markus Elfring2b1116b2019-11-05 22:26:53 +01001784 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1785 > rsp_iov[1].iov_len)
1786 goto e_fault;
1787
1788 if (copy_to_user(&pqi->input_buffer_length,
1789 &qi.input_buffer_length,
1790 sizeof(qi.input_buffer_length)))
1791 goto e_fault;
1792
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001793 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1794 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
Markus Elfring2b1116b2019-11-05 22:26:53 +01001795 qi.input_buffer_length))
1796 goto e_fault;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001797 } else {
1798 pqi = (struct smb_query_info __user *)arg;
1799 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1800 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1801 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
Markus Elfring2b1116b2019-11-05 22:26:53 +01001802 if (copy_to_user(&pqi->input_buffer_length,
1803 &qi.input_buffer_length,
1804 sizeof(qi.input_buffer_length)))
1805 goto e_fault;
1806
1807 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1808 qi.input_buffer_length))
1809 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001810 }
1811
1812 iqinf_exit:
Aurelien Aptelccd48ec2021-04-09 15:47:01 +02001813 cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
1814 cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
1815 cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001816 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1817 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1818 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Aurelien Aptelccd48ec2021-04-09 15:47:01 +02001819 kfree(vars);
1820 kfree(buffer);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001821 return rc;
Markus Elfring2b1116b2019-11-05 22:26:53 +01001822
1823e_fault:
1824 rc = -EFAULT;
1825 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001826}
1827
Sachin Prabhu620d8742017-02-10 16:03:51 +05301828static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001829smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001830 struct cifsFileInfo *srcfile,
1831 struct cifsFileInfo *trgtfile, u64 src_off,
1832 u64 len, u64 dest_off)
1833{
1834 int rc;
1835 unsigned int ret_data_len;
1836 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001837 struct copychunk_ioctl_rsp *retbuf = NULL;
1838 struct cifs_tcon *tcon;
1839 int chunks_copied = 0;
1840 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301841 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001842
1843 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1844
1845 if (pcchunk == NULL)
1846 return -ENOMEM;
1847
Christoph Probsta205d502019-05-08 21:36:25 +02001848 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001849 /* Request a key from the server to identify the source of the copy */
1850 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1851 srcfile->fid.persistent_fid,
1852 srcfile->fid.volatile_fid, pcchunk);
1853
1854 /* Note: request_res_key sets res_key null only if rc !=0 */
1855 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001856 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001857
1858 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001859 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001860 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001861 pcchunk->Reserved2 = 0;
1862
Steve French9bf0c9c2013-11-16 18:05:28 -06001863 tcon = tlink_tcon(trgtfile->tlink);
1864
1865 while (len > 0) {
1866 pcchunk->SourceOffset = cpu_to_le64(src_off);
1867 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1868 pcchunk->Length =
1869 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1870
1871 /* Request server copy to target from src identified by key */
Ronnie Sahlbergd201d762021-05-19 08:40:11 +10001872 kfree(retbuf);
1873 retbuf = NULL;
Steve French9bf0c9c2013-11-16 18:05:28 -06001874 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001875 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001876 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001877 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1878 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001879 if (rc == 0) {
1880 if (ret_data_len !=
1881 sizeof(struct copychunk_ioctl_rsp)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001882 cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001883 rc = -EIO;
1884 goto cchunk_out;
1885 }
1886 if (retbuf->TotalBytesWritten == 0) {
1887 cifs_dbg(FYI, "no bytes copied\n");
1888 rc = -EIO;
1889 goto cchunk_out;
1890 }
1891 /*
1892 * Check if server claimed to write more than we asked
1893 */
1894 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1895 le32_to_cpu(pcchunk->Length)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001896 cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001897 rc = -EIO;
1898 goto cchunk_out;
1899 }
1900 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001901 cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001902 rc = -EIO;
1903 goto cchunk_out;
1904 }
1905 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001906
Sachin Prabhu620d8742017-02-10 16:03:51 +05301907 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1908 src_off += bytes_written;
1909 dest_off += bytes_written;
1910 len -= bytes_written;
1911 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001912
Sachin Prabhu620d8742017-02-10 16:03:51 +05301913 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001914 le32_to_cpu(retbuf->ChunksWritten),
1915 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301916 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001917 } else if (rc == -EINVAL) {
1918 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1919 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001920
Steve French9bf0c9c2013-11-16 18:05:28 -06001921 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1922 le32_to_cpu(retbuf->ChunksWritten),
1923 le32_to_cpu(retbuf->ChunkBytesWritten),
1924 le32_to_cpu(retbuf->TotalBytesWritten));
1925
1926 /*
1927 * Check if this is the first request using these sizes,
1928 * (ie check if copy succeed once with original sizes
1929 * and check if the server gave us different sizes after
1930 * we already updated max sizes on previous request).
1931 * if not then why is the server returning an error now
1932 */
1933 if ((chunks_copied != 0) || chunk_sizes_updated)
1934 goto cchunk_out;
1935
1936 /* Check that server is not asking us to grow size */
1937 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1938 tcon->max_bytes_chunk)
1939 tcon->max_bytes_chunk =
1940 le32_to_cpu(retbuf->ChunkBytesWritten);
1941 else
1942 goto cchunk_out; /* server gave us bogus size */
1943
1944 /* No need to change MaxChunks since already set to 1 */
1945 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001946 } else
1947 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001948 }
1949
1950cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001951 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001952 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301953 if (rc)
1954 return rc;
1955 else
1956 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001957}
1958
1959static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001960smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1961 struct cifs_fid *fid)
1962{
1963 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1964}
1965
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001966static unsigned int
1967smb2_read_data_offset(char *buf)
1968{
1969 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001970
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001971 return rsp->DataOffset;
1972}
1973
1974static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001975smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001976{
1977 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001978
1979 if (in_remaining)
1980 return le32_to_cpu(rsp->DataRemaining);
1981
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001982 return le32_to_cpu(rsp->DataLength);
1983}
1984
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001985
1986static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001987smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001988 struct cifs_io_parms *parms, unsigned int *bytes_read,
1989 char **buf, int *buf_type)
1990{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001991 parms->persistent_fid = pfid->persistent_fid;
1992 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001993 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1994}
1995
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001996static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001997smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001998 struct cifs_io_parms *parms, unsigned int *written,
1999 struct kvec *iov, unsigned long nr_segs)
2000{
2001
Steve Frenchdb8b6312014-09-22 05:13:55 -05002002 parms->persistent_fid = pfid->persistent_fid;
2003 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002004 return SMB2_write(xid, parms, written, iov, nr_segs);
2005}
2006
Steve Frenchd43cc792014-08-13 17:16:29 -05002007/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
2008static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
2009 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
2010{
2011 struct cifsInodeInfo *cifsi;
2012 int rc;
2013
2014 cifsi = CIFS_I(inode);
2015
2016 /* if file already sparse don't bother setting sparse again */
2017 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
2018 return true; /* already sparse */
2019
2020 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
2021 return true; /* already not sparse */
2022
2023 /*
2024 * Can't check for sparse support on share the usual way via the
2025 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
2026 * since Samba server doesn't set the flag on the share, yet
2027 * supports the set sparse FSCTL and returns sparse correctly
2028 * in the file attributes. If we fail setting sparse though we
2029 * mark that server does not support sparse files for this share
2030 * to avoid repeatedly sending the unsupported fsctl to server
2031 * if the file is repeatedly extended.
2032 */
2033 if (tcon->broken_sparse_sup)
2034 return false;
2035
2036 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2037 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002038 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05002039 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05002040 if (rc) {
2041 tcon->broken_sparse_sup = true;
2042 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
2043 return false;
2044 }
2045
2046 if (setsparse)
2047 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
2048 else
2049 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
2050
2051 return true;
2052}
2053
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002054static int
2055smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
2056 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
2057{
2058 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05002059 struct inode *inode;
2060
2061 /*
2062 * If extending file more than one page make sparse. Many Linux fs
2063 * make files sparse by default when extending via ftruncate
2064 */
David Howells2b0143b2015-03-17 22:25:59 +00002065 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05002066
2067 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05002068 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05002069
Steve Frenchd43cc792014-08-13 17:16:29 -05002070 /* whether set sparse succeeds or not, extend the file */
2071 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05002072 }
2073
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002074 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10002075 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002076}
2077
Steve French02b16662015-06-27 21:18:36 -07002078static int
2079smb2_duplicate_extents(const unsigned int xid,
2080 struct cifsFileInfo *srcfile,
2081 struct cifsFileInfo *trgtfile, u64 src_off,
2082 u64 len, u64 dest_off)
2083{
2084 int rc;
2085 unsigned int ret_data_len;
Steve Frenchcfc63fc2021-03-26 18:41:55 -05002086 struct inode *inode;
Steve French02b16662015-06-27 21:18:36 -07002087 struct duplicate_extents_to_file dup_ext_buf;
2088 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
2089
2090 /* server fileays advertise duplicate extent support with this flag */
2091 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
2092 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
2093 return -EOPNOTSUPP;
2094
2095 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
2096 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
2097 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
2098 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
2099 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02002100 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07002101 src_off, dest_off, len);
2102
Steve Frenchcfc63fc2021-03-26 18:41:55 -05002103 inode = d_inode(trgtfile->dentry);
2104 if (inode->i_size < dest_off + len) {
2105 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
2106 if (rc)
2107 goto duplicate_extents_out;
Steve French02b16662015-06-27 21:18:36 -07002108
Steve Frenchcfc63fc2021-03-26 18:41:55 -05002109 /*
2110 * Although also could set plausible allocation size (i_blocks)
2111 * here in addition to setting the file size, in reflink
2112 * it is likely that the target file is sparse. Its allocation
2113 * size will be queried on next revalidate, but it is important
2114 * to make sure that file's cached size is updated immediately
2115 */
2116 cifs_setsize(inode, dest_off + len);
2117 }
Steve French02b16662015-06-27 21:18:36 -07002118 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
2119 trgtfile->fid.volatile_fid,
2120 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002121 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01002122 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07002123 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05002124 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07002125 &ret_data_len);
2126
2127 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02002128 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07002129
2130duplicate_extents_out:
2131 return rc;
2132}
Steve French02b16662015-06-27 21:18:36 -07002133
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002134static int
Steve French64a5cfa2013-10-14 15:31:32 -05002135smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
2136 struct cifsFileInfo *cfile)
2137{
2138 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
2139 cfile->fid.volatile_fid);
2140}
2141
2142static int
Steve Frenchb3152e22015-06-24 03:17:02 -05002143smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
2144 struct cifsFileInfo *cfile)
2145{
2146 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05002147 unsigned int ret_data_len;
2148
2149 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
2150 integr_info.Flags = 0;
2151 integr_info.Reserved = 0;
2152
2153 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2154 cfile->fid.volatile_fid,
2155 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002156 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01002157 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05002158 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05002159 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05002160 &ret_data_len);
2161
2162}
2163
Steve Frenche02789a2018-08-09 14:33:12 -05002164/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
2165#define GMT_TOKEN_SIZE 50
2166
Steve French153322f2019-03-28 22:32:49 -05002167#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
2168
Steve Frenche02789a2018-08-09 14:33:12 -05002169/*
2170 * Input buffer contains (empty) struct smb_snapshot array with size filled in
2171 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
2172 */
Steve Frenchb3152e22015-06-24 03:17:02 -05002173static int
Steve French834170c2016-09-30 21:14:26 -05002174smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2175 struct cifsFileInfo *cfile, void __user *ioc_buf)
2176{
2177 char *retbuf = NULL;
2178 unsigned int ret_data_len = 0;
2179 int rc;
Steve French153322f2019-03-28 22:32:49 -05002180 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05002181 struct smb_snapshot_array snapshot_in;
2182
Steve French973189a2019-04-04 00:41:04 -05002183 /*
2184 * On the first query to enumerate the list of snapshots available
2185 * for this volume the buffer begins with 0 (number of snapshots
2186 * which can be returned is zero since at that point we do not know
2187 * how big the buffer needs to be). On the second query,
2188 * it (ret_data_len) is set to number of snapshots so we can
2189 * know to set the maximum response size larger (see below).
2190 */
Steve French153322f2019-03-28 22:32:49 -05002191 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
2192 return -EFAULT;
2193
2194 /*
2195 * Note that for snapshot queries that servers like Azure expect that
2196 * the first query be minimal size (and just used to get the number/size
2197 * of previous versions) so response size must be specified as EXACTLY
2198 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2199 * of eight bytes.
2200 */
2201 if (ret_data_len == 0)
2202 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
2203 else
2204 max_response_size = CIFSMaxBufSize;
2205
Steve French834170c2016-09-30 21:14:26 -05002206 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2207 cfile->fid.volatile_fid,
2208 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002209 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002210 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05002211 (char **)&retbuf,
2212 &ret_data_len);
2213 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
2214 rc, ret_data_len);
2215 if (rc)
2216 return rc;
2217
2218 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
2219 /* Fixup buffer */
2220 if (copy_from_user(&snapshot_in, ioc_buf,
2221 sizeof(struct smb_snapshot_array))) {
2222 rc = -EFAULT;
2223 kfree(retbuf);
2224 return rc;
2225 }
Steve French834170c2016-09-30 21:14:26 -05002226
Steve Frenche02789a2018-08-09 14:33:12 -05002227 /*
2228 * Check for min size, ie not large enough to fit even one GMT
2229 * token (snapshot). On the first ioctl some users may pass in
2230 * smaller size (or zero) to simply get the size of the array
2231 * so the user space caller can allocate sufficient memory
2232 * and retry the ioctl again with larger array size sufficient
2233 * to hold all of the snapshot GMT tokens on the second try.
2234 */
2235 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2236 ret_data_len = sizeof(struct smb_snapshot_array);
2237
2238 /*
2239 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2240 * the snapshot array (of 50 byte GMT tokens) each
2241 * representing an available previous version of the data
2242 */
2243 if (ret_data_len > (snapshot_in.snapshot_array_size +
2244 sizeof(struct smb_snapshot_array)))
2245 ret_data_len = snapshot_in.snapshot_array_size +
2246 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05002247
2248 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2249 rc = -EFAULT;
2250 }
2251
2252 kfree(retbuf);
2253 return rc;
2254}
2255
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002256
2257
2258static int
2259smb3_notify(const unsigned int xid, struct file *pfile,
2260 void __user *ioc_buf)
2261{
2262 struct smb3_notify notify;
2263 struct dentry *dentry = pfile->f_path.dentry;
2264 struct inode *inode = file_inode(pfile);
Al Virof6a9bc32021-03-05 17:36:04 -05002265 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002266 struct cifs_open_parms oparms;
2267 struct cifs_fid fid;
2268 struct cifs_tcon *tcon;
Al Virof6a9bc32021-03-05 17:36:04 -05002269 const unsigned char *path;
2270 void *page = alloc_dentry_path();
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002271 __le16 *utf16_path = NULL;
2272 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2273 int rc = 0;
2274
Al Virof6a9bc32021-03-05 17:36:04 -05002275 path = build_path_from_dentry(dentry, page);
2276 if (IS_ERR(path)) {
2277 rc = PTR_ERR(path);
2278 goto notify_exit;
2279 }
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002280
Eugene Korenevskya637f4a2021-04-16 10:35:30 +03002281 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002282 if (utf16_path == NULL) {
2283 rc = -ENOMEM;
2284 goto notify_exit;
2285 }
2286
2287 if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
2288 rc = -EFAULT;
2289 goto notify_exit;
2290 }
2291
2292 tcon = cifs_sb_master_tcon(cifs_sb);
2293 oparms.tcon = tcon;
Steve French4ef9b4f2020-07-07 18:08:46 -05002294 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002295 oparms.disposition = FILE_OPEN;
2296 oparms.create_options = cifs_create_options(cifs_sb, 0);
2297 oparms.fid = &fid;
2298 oparms.reconnect = false;
2299
Aurelien Aptel69dda302020-03-02 17:53:22 +01002300 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
2301 NULL);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002302 if (rc)
2303 goto notify_exit;
2304
2305 rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
2306 notify.watch_tree, notify.completion_filter);
2307
2308 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2309
2310 cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
2311
2312notify_exit:
Al Virof6a9bc32021-03-05 17:36:04 -05002313 free_dentry_path(page);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002314 kfree(utf16_path);
2315 return rc;
2316}
2317
Steve French834170c2016-09-30 21:14:26 -05002318static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002319smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2320 const char *path, struct cifs_sb_info *cifs_sb,
2321 struct cifs_fid *fid, __u16 search_flags,
2322 struct cifs_search_info *srch_inf)
2323{
2324 __le16 *utf16_path;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002325 struct smb_rqst rqst[2];
2326 struct kvec rsp_iov[2];
2327 int resp_buftype[2];
2328 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2329 struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
2330 int rc, flags = 0;
2331 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002332 struct cifs_open_parms oparms;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002333 struct smb2_query_directory_rsp *qd_rsp = NULL;
2334 struct smb2_create_rsp *op_rsp = NULL;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002335 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Thiago Rafael Becker6efa9942021-06-15 13:42:56 -03002336 int retry_count = 0;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002337
2338 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2339 if (!utf16_path)
2340 return -ENOMEM;
2341
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002342 if (smb3_encryption_required(tcon))
2343 flags |= CIFS_TRANSFORM_REQ;
2344
2345 memset(rqst, 0, sizeof(rqst));
2346 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
2347 memset(rsp_iov, 0, sizeof(rsp_iov));
2348
2349 /* Open */
2350 memset(&open_iov, 0, sizeof(open_iov));
2351 rqst[0].rq_iov = open_iov;
2352 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2353
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002354 oparms.tcon = tcon;
2355 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2356 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002357 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002358 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002359 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002360
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002361 rc = SMB2_open_init(tcon, server,
2362 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002363 if (rc)
2364 goto qdf_free;
2365 smb2_set_next_command(tcon, &rqst[0]);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002366
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002367 /* Query directory */
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002368 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02002369 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002370
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002371 memset(&qd_iov, 0, sizeof(qd_iov));
2372 rqst[1].rq_iov = qd_iov;
2373 rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
2374
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002375 rc = SMB2_query_directory_init(xid, tcon, server,
2376 &rqst[1],
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002377 COMPOUND_FID, COMPOUND_FID,
2378 0, srch_inf->info_level);
2379 if (rc)
2380 goto qdf_free;
2381
2382 smb2_set_related(&rqst[1]);
2383
Thiago Rafael Becker6efa9942021-06-15 13:42:56 -03002384again:
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002385 rc = compound_send_recv(xid, tcon->ses, server,
2386 flags, 2, rqst,
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002387 resp_buftype, rsp_iov);
2388
Thiago Rafael Becker6efa9942021-06-15 13:42:56 -03002389 if (rc == -EAGAIN && retry_count++ < 10)
2390 goto again;
2391
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002392 /* If the open failed there is nothing to do */
2393 op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002394 if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) {
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002395 cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
2396 goto qdf_free;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002397 }
Ronnie Sahlbergc4628702021-09-08 12:10:15 +10002398 fid->persistent_fid = le64_to_cpu(op_rsp->PersistentFileId);
2399 fid->volatile_fid = le64_to_cpu(op_rsp->VolatileFileId);
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002400
2401 /* Anything else than ENODATA means a genuine error */
2402 if (rc && rc != -ENODATA) {
2403 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2404 cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
2405 trace_smb3_query_dir_err(xid, fid->persistent_fid,
2406 tcon->tid, tcon->ses->Suid, 0, 0, rc);
2407 goto qdf_free;
2408 }
2409
Shyam Prasad N1be1fa42020-03-09 01:35:09 -07002410 atomic_inc(&tcon->num_remote_opens);
2411
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002412 qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002413 if (qd_rsp->hdr.Status == STATUS_NO_MORE_FILES) {
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002414 trace_smb3_query_dir_done(xid, fid->persistent_fid,
2415 tcon->tid, tcon->ses->Suid, 0, 0);
2416 srch_inf->endOfSearch = true;
2417 rc = 0;
2418 goto qdf_free;
2419 }
2420
2421 rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
2422 srch_inf);
2423 if (rc) {
2424 trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
2425 tcon->ses->Suid, 0, 0, rc);
2426 goto qdf_free;
2427 }
2428 resp_buftype[1] = CIFS_NO_BUFFER;
2429
2430 trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
2431 tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
2432
2433 qdf_free:
2434 kfree(utf16_path);
2435 SMB2_open_free(&rqst[0]);
2436 SMB2_query_directory_free(&rqst[1]);
2437 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2438 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002439 return rc;
2440}
2441
2442static int
2443smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2444 struct cifs_fid *fid, __u16 search_flags,
2445 struct cifs_search_info *srch_inf)
2446{
2447 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2448 fid->volatile_fid, 0, srch_inf);
2449}
2450
2451static int
2452smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2453 struct cifs_fid *fid)
2454{
2455 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2456}
2457
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002458/*
Christoph Probsta205d502019-05-08 21:36:25 +02002459 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2460 * the number of credits and return true. Otherwise - return false.
2461 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002462static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002463smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002464{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002465 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
Shyam Prasad N6d82c272021-02-03 23:20:46 -08002466 int scredits, in_flight;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002467
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002468 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002469 return false;
2470
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002471 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002472 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002473 server->credits += le16_to_cpu(shdr->CreditRequest);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -08002474 scredits = server->credits;
Shyam Prasad N6d82c272021-02-03 23:20:46 -08002475 in_flight = server->in_flight;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002476 spin_unlock(&server->req_lock);
2477 wake_up(&server->request_q);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -08002478
2479 trace_smb3_add_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -08002480 server->conn_id, server->hostname, scredits,
2481 le16_to_cpu(shdr->CreditRequest), in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -08002482 cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
2483 __func__, le16_to_cpu(shdr->CreditRequest), scredits);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002484 }
2485
2486 return true;
2487}
2488
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002489static bool
2490smb2_is_session_expired(char *buf)
2491{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002492 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002493
Mark Symsd81243c2018-05-24 09:47:31 +01002494 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2495 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002496 return false;
2497
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002498 trace_smb3_ses_expired(le32_to_cpu(shdr->Id.SyncId.TreeId),
2499 le64_to_cpu(shdr->SessionId),
Steve Frenche68a9322018-07-30 14:23:58 -05002500 le16_to_cpu(shdr->Command),
2501 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002502 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002503
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002504 return true;
2505}
2506
Rohith Surabattula8e670f72020-09-18 05:37:28 +00002507static bool
2508smb2_is_status_io_timeout(char *buf)
2509{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002510 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
Rohith Surabattula8e670f72020-09-18 05:37:28 +00002511
2512 if (shdr->Status == STATUS_IO_TIMEOUT)
2513 return true;
2514 else
2515 return false;
2516}
2517
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002518static void
2519smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
2520{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002521 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002522 struct list_head *tmp, *tmp1;
2523 struct cifs_ses *ses;
2524 struct cifs_tcon *tcon;
2525
Steve Frenchf1a08652021-02-20 18:52:15 -06002526 if (shdr->Status != STATUS_NETWORK_NAME_DELETED)
2527 return;
2528
2529 spin_lock(&cifs_tcp_ses_lock);
2530 list_for_each(tmp, &server->smb_ses_list) {
2531 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
2532 list_for_each(tmp1, &ses->tcon_list) {
2533 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002534 if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
Steve Frenchf1a08652021-02-20 18:52:15 -06002535 tcon->need_reconnect = true;
2536 spin_unlock(&cifs_tcp_ses_lock);
2537 pr_warn_once("Server share %s deleted.\n",
2538 tcon->treeName);
2539 return;
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002540 }
2541 }
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002542 }
Steve Frenchf1a08652021-02-20 18:52:15 -06002543 spin_unlock(&cifs_tcp_ses_lock);
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002544}
2545
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002546static int
2547smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2548 struct cifsInodeInfo *cinode)
2549{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002550 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2551 return SMB2_lease_break(0, tcon, cinode->lease_key,
2552 smb2_get_lease_state(cinode));
2553
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002554 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2555 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002556 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002557}
2558
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002559void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002560smb2_set_related(struct smb_rqst *rqst)
2561{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002562 struct smb2_hdr *shdr;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002563
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002564 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002565 if (shdr == NULL) {
2566 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2567 return;
2568 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002569 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2570}
2571
2572char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2573
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002574void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002575smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002576{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002577 struct smb2_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002578 struct cifs_ses *ses = tcon->ses;
2579 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002580 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002581 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002582
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09002583 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002584 if (shdr == NULL) {
2585 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2586 return;
2587 }
2588
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002589 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002590
2591 /* No padding needed */
2592 if (!(len & 7))
2593 goto finished;
2594
2595 num_padding = 8 - (len & 7);
2596 if (!smb3_encryption_required(tcon)) {
2597 /*
2598 * If we do not have encryption then we can just add an extra
2599 * iov for the padding.
2600 */
2601 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2602 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2603 rqst->rq_nvec++;
2604 len += num_padding;
2605 } else {
2606 /*
2607 * We can not add a small padding iov for the encryption case
2608 * because the encryption framework can not handle the padding
2609 * iovs.
2610 * We have to flatten this into a single buffer and add
2611 * the padding to it.
2612 */
2613 for (i = 1; i < rqst->rq_nvec; i++) {
2614 memcpy(rqst->rq_iov[0].iov_base +
2615 rqst->rq_iov[0].iov_len,
2616 rqst->rq_iov[i].iov_base,
2617 rqst->rq_iov[i].iov_len);
2618 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002619 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002620 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2621 0, num_padding);
2622 rqst->rq_iov[0].iov_len += num_padding;
2623 len += num_padding;
2624 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002625 }
2626
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002627 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002628 shdr->NextCommand = cpu_to_le32(len);
2629}
2630
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002631/*
2632 * Passes the query info response back to the caller on success.
2633 * Caller need to free this with free_rsp_buf().
2634 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002635int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002636smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2637 __le16 *utf16_path, u32 desired_access,
2638 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002639 struct kvec *rsp, int *buftype,
2640 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002641{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002642 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002643 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03002644 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002645 struct smb_rqst rqst[3];
2646 int resp_buftype[3];
2647 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002648 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002649 struct kvec qi_iov[1];
2650 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002651 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002652 struct cifs_open_parms oparms;
2653 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002654 int rc;
2655
2656 if (smb3_encryption_required(tcon))
2657 flags |= CIFS_TRANSFORM_REQ;
2658
2659 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002660 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002661 memset(rsp_iov, 0, sizeof(rsp_iov));
2662
2663 memset(&open_iov, 0, sizeof(open_iov));
2664 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002665 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002666
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002667 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002668 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002669 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002670 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002671 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002672 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002673
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002674 rc = SMB2_open_init(tcon, server,
2675 &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002676 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002677 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002678 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002679
2680 memset(&qi_iov, 0, sizeof(qi_iov));
2681 rqst[1].rq_iov = qi_iov;
2682 rqst[1].rq_nvec = 1;
2683
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002684 rc = SMB2_query_info_init(tcon, server,
2685 &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002686 class, type, 0,
2687 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002688 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002689 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002690 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002691 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002692 smb2_set_related(&rqst[1]);
2693
2694 memset(&close_iov, 0, sizeof(close_iov));
2695 rqst[2].rq_iov = close_iov;
2696 rqst[2].rq_nvec = 1;
2697
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002698 rc = SMB2_close_init(tcon, server,
2699 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002700 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002701 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002702 smb2_set_related(&rqst[2]);
2703
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002704 rc = compound_send_recv(xid, ses, server,
2705 flags, 3, rqst,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002706 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002707 if (rc) {
2708 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French7dcc82c2019-09-11 00:07:36 -05002709 if (rc == -EREMCHG) {
2710 tcon->need_reconnect = true;
Joe Perchesa0a30362020-04-14 22:42:53 -07002711 pr_warn_once("server share %s deleted\n",
2712 tcon->treeName);
Steve French7dcc82c2019-09-11 00:07:36 -05002713 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002714 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002715 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002716 *rsp = rsp_iov[1];
2717 *buftype = resp_buftype[1];
2718
2719 qic_exit:
2720 SMB2_open_free(&rqst[0]);
2721 SMB2_query_info_free(&rqst[1]);
2722 SMB2_close_free(&rqst[2]);
2723 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2724 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2725 return rc;
2726}
2727
2728static int
2729smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002730 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002731{
2732 struct smb2_query_info_rsp *rsp;
2733 struct smb2_fs_full_size_info *info = NULL;
2734 __le16 utf16_path = 0; /* Null - open root of share */
2735 struct kvec rsp_iov = {NULL, 0};
2736 int buftype = CIFS_NO_BUFFER;
2737 int rc;
2738
2739
2740 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2741 FILE_READ_ATTRIBUTES,
2742 FS_FULL_SIZE_INFORMATION,
2743 SMB2_O_INFO_FILESYSTEM,
2744 sizeof(struct smb2_fs_full_size_info),
Steve French87f93d82020-02-04 13:02:59 -06002745 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002746 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002747 goto qfs_exit;
2748
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002749 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002750 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002751 info = (struct smb2_fs_full_size_info *)(
2752 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2753 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2754 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002755 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002756 sizeof(struct smb2_fs_full_size_info));
2757 if (!rc)
2758 smb2_copy_fs_info_to_kstatfs(info, buf);
2759
2760qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002761 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002762 return rc;
2763}
2764
Steve French2d304212018-06-24 23:28:12 -05002765static int
2766smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002767 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Steve French2d304212018-06-24 23:28:12 -05002768{
2769 int rc;
2770 __le16 srch_path = 0; /* Null - open root of share */
2771 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2772 struct cifs_open_parms oparms;
2773 struct cifs_fid fid;
2774
2775 if (!tcon->posix_extensions)
Amir Goldstein0f060932020-02-03 21:46:43 +02002776 return smb2_queryfs(xid, tcon, cifs_sb, buf);
Steve French2d304212018-06-24 23:28:12 -05002777
2778 oparms.tcon = tcon;
2779 oparms.desired_access = FILE_READ_ATTRIBUTES;
2780 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002781 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French2d304212018-06-24 23:28:12 -05002782 oparms.fid = &fid;
2783 oparms.reconnect = false;
2784
Aurelien Aptel69dda302020-03-02 17:53:22 +01002785 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
2786 NULL, NULL);
Steve French2d304212018-06-24 23:28:12 -05002787 if (rc)
2788 return rc;
2789
2790 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2791 fid.volatile_fid, buf);
2792 buf->f_type = SMB2_MAGIC_NUMBER;
2793 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2794 return rc;
2795}
Steve French2d304212018-06-24 23:28:12 -05002796
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002797static bool
2798smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2799{
2800 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2801 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2802}
2803
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002804static int
2805smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2806 __u64 length, __u32 type, int lock, int unlock, bool wait)
2807{
2808 if (unlock && !lock)
2809 type = SMB2_LOCKFLAG_UNLOCK;
2810 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2811 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2812 current->tgid, length, offset, type, wait);
2813}
2814
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002815static void
2816smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2817{
2818 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2819}
2820
2821static void
2822smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2823{
2824 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2825}
2826
2827static void
2828smb2_new_lease_key(struct cifs_fid *fid)
2829{
Steve Frenchfa70b872016-09-22 00:39:34 -05002830 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002831}
2832
Aurelien Aptel9d496402017-02-13 16:16:49 +01002833static int
2834smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2835 const char *search_name,
2836 struct dfs_info3_param **target_nodes,
2837 unsigned int *num_of_nodes,
2838 const struct nls_table *nls_codepage, int remap)
2839{
2840 int rc;
2841 __le16 *utf16_path = NULL;
2842 int utf16_path_len = 0;
2843 struct cifs_tcon *tcon;
2844 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2845 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2846 u32 dfs_req_size = 0, dfs_rsp_size = 0;
Paulo Alcantarac88f7dc2021-11-03 13:53:29 -03002847 int retry_count = 0;
Aurelien Aptel9d496402017-02-13 16:16:49 +01002848
Christoph Probsta205d502019-05-08 21:36:25 +02002849 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002850
2851 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002852 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002853 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002854 tcon = ses->tcon_ipc;
2855 if (tcon == NULL) {
2856 spin_lock(&cifs_tcp_ses_lock);
2857 tcon = list_first_entry_or_null(&ses->tcon_list,
2858 struct cifs_tcon,
2859 tcon_list);
2860 if (tcon)
2861 tcon->tc_count++;
2862 spin_unlock(&cifs_tcp_ses_lock);
2863 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002864
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002865 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002866 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2867 ses);
2868 rc = -ENOTCONN;
2869 goto out;
2870 }
2871
2872 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2873 &utf16_path_len,
2874 nls_codepage, remap);
2875 if (!utf16_path) {
2876 rc = -ENOMEM;
2877 goto out;
2878 }
2879
2880 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2881 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2882 if (!dfs_req) {
2883 rc = -ENOMEM;
2884 goto out;
2885 }
2886
2887 /* Highest DFS referral version understood */
2888 dfs_req->MaxReferralLevel = DFS_VERSION;
2889
2890 /* Path to resolve in an UTF-16 null-terminated string */
2891 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2892
2893 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002894 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2895 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002896 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002897 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002898 (char **)&dfs_rsp, &dfs_rsp_size);
Paulo Alcantarac88f7dc2021-11-03 13:53:29 -03002899 if (!is_retryable_error(rc))
2900 break;
2901 usleep_range(512, 2048);
2902 } while (++retry_count < 5);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002903
2904 if (rc) {
Paulo Alcantarac88f7dc2021-11-03 13:53:29 -03002905 if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
2906 cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002907 goto out;
2908 }
2909
2910 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2911 num_of_nodes, target_nodes,
2912 nls_codepage, remap, search_name,
2913 true /* is_unicode */);
2914 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002915 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002916 goto out;
2917 }
2918
2919 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002920 if (tcon && !tcon->ipc) {
2921 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002922 spin_lock(&cifs_tcp_ses_lock);
2923 tcon->tc_count--;
Shyam Prasad N16dd9b82021-07-14 23:00:00 -05002924 /* tc_count can never go negative */
2925 WARN_ON(tcon->tc_count < 0);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002926 spin_unlock(&cifs_tcp_ses_lock);
2927 }
2928 kfree(utf16_path);
2929 kfree(dfs_req);
2930 kfree(dfs_rsp);
2931 return rc;
2932}
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002933
2934static int
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002935parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2936 u32 plen, char **target_path,
2937 struct cifs_sb_info *cifs_sb)
2938{
2939 unsigned int len;
2940
2941 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2942 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2943
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002944 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2945 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2946 le64_to_cpu(symlink_buf->InodeType));
2947 return -EOPNOTSUPP;
2948 }
2949
2950 *target_path = cifs_strndup_from_utf16(
2951 symlink_buf->PathBuffer,
2952 len, true, cifs_sb->local_nls);
2953 if (!(*target_path))
2954 return -ENOMEM;
2955
2956 convert_delimiter(*target_path, '/');
2957 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2958
2959 return 0;
2960}
2961
2962static int
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002963parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2964 u32 plen, char **target_path,
2965 struct cifs_sb_info *cifs_sb)
2966{
2967 unsigned int sub_len;
2968 unsigned int sub_offset;
2969
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002970 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002971
2972 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2973 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2974 if (sub_offset + 20 > plen ||
2975 sub_offset + sub_len + 20 > plen) {
2976 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2977 return -EIO;
2978 }
2979
2980 *target_path = cifs_strndup_from_utf16(
2981 symlink_buf->PathBuffer + sub_offset,
2982 sub_len, true, cifs_sb->local_nls);
2983 if (!(*target_path))
2984 return -ENOMEM;
2985
2986 convert_delimiter(*target_path, '/');
2987 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2988
2989 return 0;
2990}
2991
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002992static int
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002993parse_reparse_point(struct reparse_data_buffer *buf,
2994 u32 plen, char **target_path,
2995 struct cifs_sb_info *cifs_sb)
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002996{
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002997 if (plen < sizeof(struct reparse_data_buffer)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07002998 cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n",
2999 plen);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10003000 return -EIO;
3001 }
3002
3003 if (plen < le16_to_cpu(buf->ReparseDataLength) +
3004 sizeof(struct reparse_data_buffer)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07003005 cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n",
3006 plen);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10003007 return -EIO;
3008 }
3009
Steve Frenchd5ecebc2019-06-28 02:04:18 -05003010 /* See MS-FSCC 2.1.2 */
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10003011 switch (le32_to_cpu(buf->ReparseTag)) {
3012 case IO_REPARSE_TAG_NFS:
3013 return parse_reparse_posix(
3014 (struct reparse_posix_data *)buf,
Steve Frenchd5ecebc2019-06-28 02:04:18 -05003015 plen, target_path, cifs_sb);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10003016 case IO_REPARSE_TAG_SYMLINK:
3017 return parse_reparse_symlink(
3018 (struct reparse_symlink_data_buffer *)buf,
3019 plen, target_path, cifs_sb);
3020 default:
Joe Perchesa0a30362020-04-14 22:42:53 -07003021 cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n",
3022 le32_to_cpu(buf->ReparseTag));
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10003023 return -EOPNOTSUPP;
3024 }
Steve Frenchd5ecebc2019-06-28 02:04:18 -05003025}
3026
Pavel Shilovsky78932422016-07-24 10:37:38 +03003027#define SMB2_SYMLINK_STRUCT_SIZE \
3028 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
3029
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003030static int
3031smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003032 struct cifs_sb_info *cifs_sb, const char *full_path,
3033 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003034{
3035 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003036 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003037 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3038 struct cifs_open_parms oparms;
3039 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10003040 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003041 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003042 struct smb2_symlink_err_rsp *symlink;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003043 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovsky78932422016-07-24 10:37:38 +03003044 unsigned int sub_len;
3045 unsigned int sub_offset;
3046 unsigned int print_len;
3047 unsigned int print_offset;
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03003048 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003049 struct smb_rqst rqst[3];
3050 int resp_buftype[3];
3051 struct kvec rsp_iov[3];
3052 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
3053 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
3054 struct kvec close_iov[1];
3055 struct smb2_create_rsp *create_rsp;
3056 struct smb2_ioctl_rsp *ioctl_rsp;
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003057 struct reparse_data_buffer *reparse_buf;
Amir Goldstein0f060932020-02-03 21:46:43 +02003058 int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003059 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003060
3061 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
3062
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003063 *target_path = NULL;
3064
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003065 if (smb3_encryption_required(tcon))
3066 flags |= CIFS_TRANSFORM_REQ;
3067
3068 memset(rqst, 0, sizeof(rqst));
3069 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
3070 memset(rsp_iov, 0, sizeof(rsp_iov));
3071
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003072 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
3073 if (!utf16_path)
3074 return -ENOMEM;
3075
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003076 /* Open */
3077 memset(&open_iov, 0, sizeof(open_iov));
3078 rqst[0].rq_iov = open_iov;
3079 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
3080
3081 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003082 oparms.tcon = tcon;
3083 oparms.desired_access = FILE_READ_ATTRIBUTES;
3084 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02003085 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003086 oparms.fid = &fid;
3087 oparms.reconnect = false;
3088
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003089 rc = SMB2_open_init(tcon, server,
3090 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003091 if (rc)
3092 goto querty_exit;
3093 smb2_set_next_command(tcon, &rqst[0]);
3094
3095
3096 /* IOCTL */
3097 memset(&io_iov, 0, sizeof(io_iov));
3098 rqst[1].rq_iov = io_iov;
3099 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
3100
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003101 rc = SMB2_ioctl_init(tcon, server,
3102 &rqst[1], fid.persistent_fid,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003103 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10003104 true /* is_fctl */, NULL, 0,
3105 CIFSMaxBufSize -
3106 MAX_SMB2_CREATE_RESPONSE_SIZE -
3107 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003108 if (rc)
3109 goto querty_exit;
3110
3111 smb2_set_next_command(tcon, &rqst[1]);
3112 smb2_set_related(&rqst[1]);
3113
3114
3115 /* Close */
3116 memset(&close_iov, 0, sizeof(close_iov));
3117 rqst[2].rq_iov = close_iov;
3118 rqst[2].rq_nvec = 1;
3119
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003120 rc = SMB2_close_init(tcon, server,
3121 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003122 if (rc)
3123 goto querty_exit;
3124
3125 smb2_set_related(&rqst[2]);
3126
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003127 rc = compound_send_recv(xid, tcon->ses, server,
3128 flags, 3, rqst,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003129 resp_buftype, rsp_iov);
3130
3131 create_rsp = rsp_iov[0].iov_base;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09003132 if (create_rsp && create_rsp->hdr.Status)
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003133 err_iov = rsp_iov[0];
3134 ioctl_rsp = rsp_iov[1].iov_base;
3135
3136 /*
3137 * Open was successful and we got an ioctl response.
3138 */
3139 if ((rc == 0) && (is_reparse_point)) {
3140 /* See MS-FSCC 2.3.23 */
3141
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003142 reparse_buf = (struct reparse_data_buffer *)
3143 ((char *)ioctl_rsp +
3144 le32_to_cpu(ioctl_rsp->OutputOffset));
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003145 plen = le32_to_cpu(ioctl_rsp->OutputCount);
3146
3147 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
3148 rsp_iov[1].iov_len) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003149 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003150 plen);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003151 rc = -EIO;
3152 goto querty_exit;
3153 }
3154
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10003155 rc = parse_reparse_point(reparse_buf, plen, target_path,
3156 cifs_sb);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003157 goto querty_exit;
3158 }
3159
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05003160 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003161 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003162 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003163 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03003164
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10003165 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003166 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003167 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10003168 rc = -EINVAL;
3169 goto querty_exit;
3170 }
3171
3172 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
3173 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
3174 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
3175 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003176 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003177 }
3178
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003179 /* open must fail on symlink - reset rc */
3180 rc = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003181 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
3182 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03003183 print_len = le16_to_cpu(symlink->PrintNameLength);
3184 print_offset = le16_to_cpu(symlink->PrintNameOffset);
3185
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003186 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10003187 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003188 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003189 }
3190
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003191 if (err_iov.iov_len <
3192 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10003193 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003194 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003195 }
3196
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003197 *target_path = cifs_strndup_from_utf16(
3198 (char *)symlink->PathBuffer + sub_offset,
3199 sub_len, true, cifs_sb->local_nls);
3200 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003201 rc = -ENOMEM;
3202 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003203 }
3204 convert_delimiter(*target_path, '/');
3205 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003206
3207 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003208 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003209 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003210 SMB2_open_free(&rqst[0]);
3211 SMB2_ioctl_free(&rqst[1]);
3212 SMB2_close_free(&rqst[2]);
3213 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
3214 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
3215 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003216 return rc;
3217}
3218
Steve French2e4564b2020-10-22 22:03:14 -05003219int
3220smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
3221 struct cifs_sb_info *cifs_sb, const char *full_path,
3222 __u32 *tag)
3223{
3224 int rc;
3225 __le16 *utf16_path = NULL;
3226 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3227 struct cifs_open_parms oparms;
3228 struct cifs_fid fid;
Steve French2e4564b2020-10-22 22:03:14 -05003229 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03003230 int flags = CIFS_CP_CREATE_CLOSE_OP;
Steve French2e4564b2020-10-22 22:03:14 -05003231 struct smb_rqst rqst[3];
3232 int resp_buftype[3];
3233 struct kvec rsp_iov[3];
3234 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
3235 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
3236 struct kvec close_iov[1];
Steve French2e4564b2020-10-22 22:03:14 -05003237 struct smb2_ioctl_rsp *ioctl_rsp;
3238 struct reparse_data_buffer *reparse_buf;
3239 u32 plen;
3240
3241 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
3242
3243 if (smb3_encryption_required(tcon))
3244 flags |= CIFS_TRANSFORM_REQ;
3245
3246 memset(rqst, 0, sizeof(rqst));
3247 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
3248 memset(rsp_iov, 0, sizeof(rsp_iov));
3249
3250 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
3251 if (!utf16_path)
3252 return -ENOMEM;
3253
3254 /*
3255 * setup smb2open - TODO add optimization to call cifs_get_readable_path
3256 * to see if there is a handle already open that we can use
3257 */
3258 memset(&open_iov, 0, sizeof(open_iov));
3259 rqst[0].rq_iov = open_iov;
3260 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
3261
3262 memset(&oparms, 0, sizeof(oparms));
3263 oparms.tcon = tcon;
3264 oparms.desired_access = FILE_READ_ATTRIBUTES;
3265 oparms.disposition = FILE_OPEN;
3266 oparms.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT);
3267 oparms.fid = &fid;
3268 oparms.reconnect = false;
3269
3270 rc = SMB2_open_init(tcon, server,
3271 &rqst[0], &oplock, &oparms, utf16_path);
3272 if (rc)
3273 goto query_rp_exit;
3274 smb2_set_next_command(tcon, &rqst[0]);
3275
3276
3277 /* IOCTL */
3278 memset(&io_iov, 0, sizeof(io_iov));
3279 rqst[1].rq_iov = io_iov;
3280 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
3281
3282 rc = SMB2_ioctl_init(tcon, server,
Namjae Jeon79631782020-12-03 12:31:36 +09003283 &rqst[1], COMPOUND_FID,
3284 COMPOUND_FID, FSCTL_GET_REPARSE_POINT,
Steve French2e4564b2020-10-22 22:03:14 -05003285 true /* is_fctl */, NULL, 0,
3286 CIFSMaxBufSize -
3287 MAX_SMB2_CREATE_RESPONSE_SIZE -
3288 MAX_SMB2_CLOSE_RESPONSE_SIZE);
3289 if (rc)
3290 goto query_rp_exit;
3291
3292 smb2_set_next_command(tcon, &rqst[1]);
3293 smb2_set_related(&rqst[1]);
3294
3295
3296 /* Close */
3297 memset(&close_iov, 0, sizeof(close_iov));
3298 rqst[2].rq_iov = close_iov;
3299 rqst[2].rq_nvec = 1;
3300
3301 rc = SMB2_close_init(tcon, server,
3302 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
3303 if (rc)
3304 goto query_rp_exit;
3305
3306 smb2_set_related(&rqst[2]);
3307
3308 rc = compound_send_recv(xid, tcon->ses, server,
3309 flags, 3, rqst,
3310 resp_buftype, rsp_iov);
3311
Steve French2e4564b2020-10-22 22:03:14 -05003312 ioctl_rsp = rsp_iov[1].iov_base;
3313
3314 /*
3315 * Open was successful and we got an ioctl response.
3316 */
3317 if (rc == 0) {
3318 /* See MS-FSCC 2.3.23 */
3319
3320 reparse_buf = (struct reparse_data_buffer *)
3321 ((char *)ioctl_rsp +
3322 le32_to_cpu(ioctl_rsp->OutputOffset));
3323 plen = le32_to_cpu(ioctl_rsp->OutputCount);
3324
3325 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
3326 rsp_iov[1].iov_len) {
3327 cifs_tcon_dbg(FYI, "srv returned invalid ioctl len: %d\n",
3328 plen);
3329 rc = -EIO;
3330 goto query_rp_exit;
3331 }
3332 *tag = le32_to_cpu(reparse_buf->ReparseTag);
3333 }
3334
3335 query_rp_exit:
3336 kfree(utf16_path);
3337 SMB2_open_free(&rqst[0]);
3338 SMB2_ioctl_free(&rqst[1]);
3339 SMB2_close_free(&rqst[2]);
3340 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
3341 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
3342 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
3343 return rc;
3344}
3345
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003346static struct cifs_ntsd *
3347get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003348 const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003349{
3350 struct cifs_ntsd *pntsd = NULL;
3351 unsigned int xid;
3352 int rc = -EOPNOTSUPP;
3353 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3354
3355 if (IS_ERR(tlink))
3356 return ERR_CAST(tlink);
3357
3358 xid = get_xid();
3359 cifs_dbg(FYI, "trying to get acl\n");
3360
3361 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003362 cifsfid->volatile_fid, (void **)&pntsd, pacllen,
3363 info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003364 free_xid(xid);
3365
3366 cifs_put_tlink(tlink);
3367
3368 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3369 if (rc)
3370 return ERR_PTR(rc);
3371 return pntsd;
3372
3373}
3374
3375static struct cifs_ntsd *
3376get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003377 const char *path, u32 *pacllen, u32 info)
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003378{
3379 struct cifs_ntsd *pntsd = NULL;
3380 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3381 unsigned int xid;
3382 int rc;
3383 struct cifs_tcon *tcon;
3384 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3385 struct cifs_fid fid;
3386 struct cifs_open_parms oparms;
3387 __le16 *utf16_path;
3388
3389 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
3390 if (IS_ERR(tlink))
3391 return ERR_CAST(tlink);
3392
3393 tcon = tlink_tcon(tlink);
3394 xid = get_xid();
3395
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003396 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05003397 if (!utf16_path) {
3398 rc = -ENOMEM;
3399 free_xid(xid);
3400 return ERR_PTR(rc);
3401 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003402
3403 oparms.tcon = tcon;
3404 oparms.desired_access = READ_CONTROL;
3405 oparms.disposition = FILE_OPEN;
Steve French3c3317d2020-10-21 13:12:08 -05003406 /*
3407 * When querying an ACL, even if the file is a symlink we want to open
3408 * the source not the target, and so the protocol requires that the
3409 * client specify this flag when opening a reparse point
3410 */
3411 oparms.create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003412 oparms.fid = &fid;
3413 oparms.reconnect = false;
3414
Boris Protopopov3970acf2020-12-18 11:30:12 -06003415 if (info & SACL_SECINFO)
3416 oparms.desired_access |= SYSTEM_SECURITY;
3417
Aurelien Aptel69dda302020-03-02 17:53:22 +01003418 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
3419 NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003420 kfree(utf16_path);
3421 if (!rc) {
3422 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003423 fid.volatile_fid, (void **)&pntsd, pacllen,
3424 info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003425 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3426 }
3427
3428 cifs_put_tlink(tlink);
3429 free_xid(xid);
3430
3431 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3432 if (rc)
3433 return ERR_PTR(rc);
3434 return pntsd;
3435}
3436
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003437static int
3438set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
3439 struct inode *inode, const char *path, int aclflag)
3440{
3441 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3442 unsigned int xid;
3443 int rc, access_flags = 0;
3444 struct cifs_tcon *tcon;
3445 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3446 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3447 struct cifs_fid fid;
3448 struct cifs_open_parms oparms;
3449 __le16 *utf16_path;
3450
3451 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
3452 if (IS_ERR(tlink))
3453 return PTR_ERR(tlink);
3454
3455 tcon = tlink_tcon(tlink);
3456 xid = get_xid();
3457
Boris Protopopov3970acf2020-12-18 11:30:12 -06003458 if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
3459 access_flags |= WRITE_OWNER;
3460 if (aclflag & CIFS_ACL_SACL)
3461 access_flags |= SYSTEM_SECURITY;
3462 if (aclflag & CIFS_ACL_DACL)
3463 access_flags |= WRITE_DAC;
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003464
3465 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05003466 if (!utf16_path) {
3467 rc = -ENOMEM;
3468 free_xid(xid);
3469 return rc;
3470 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003471
3472 oparms.tcon = tcon;
3473 oparms.desired_access = access_flags;
Amir Goldstein0f060932020-02-03 21:46:43 +02003474 oparms.create_options = cifs_create_options(cifs_sb, 0);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003475 oparms.disposition = FILE_OPEN;
3476 oparms.path = path;
3477 oparms.fid = &fid;
3478 oparms.reconnect = false;
3479
Aurelien Aptel69dda302020-03-02 17:53:22 +01003480 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
3481 NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003482 kfree(utf16_path);
3483 if (!rc) {
3484 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3485 fid.volatile_fid, pnntsd, acllen, aclflag);
3486 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3487 }
3488
3489 cifs_put_tlink(tlink);
3490 free_xid(xid);
3491 return rc;
3492}
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003493
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003494/* Retrieve an ACL from the server */
3495static struct cifs_ntsd *
3496get_smb2_acl(struct cifs_sb_info *cifs_sb,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003497 struct inode *inode, const char *path,
3498 u32 *pacllen, u32 info)
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003499{
3500 struct cifs_ntsd *pntsd = NULL;
3501 struct cifsFileInfo *open_file = NULL;
3502
Boris Protopopov9541b812020-12-17 20:58:08 +00003503 if (inode && !(info & SACL_SECINFO))
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003504 open_file = find_readable_file(CIFS_I(inode), true);
Boris Protopopov9541b812020-12-17 20:58:08 +00003505 if (!open_file || (info & SACL_SECINFO))
Boris Protopopov3970acf2020-12-18 11:30:12 -06003506 return get_smb2_acl_by_path(cifs_sb, path, pacllen, info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003507
Boris Protopopov3970acf2020-12-18 11:30:12 -06003508 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003509 cifsFileInfo_put(open_file);
3510 return pntsd;
3511}
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003512
Steve French30175622014-08-17 18:16:40 -05003513static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
3514 loff_t offset, loff_t len, bool keep_size)
3515{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003516 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05003517 struct inode *inode;
3518 struct cifsInodeInfo *cifsi;
3519 struct cifsFileInfo *cfile = file->private_data;
3520 struct file_zero_data_information fsctl_buf;
3521 long rc;
3522 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003523 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05003524
3525 xid = get_xid();
3526
David Howells2b0143b2015-03-17 22:25:59 +00003527 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05003528 cifsi = CIFS_I(inode);
3529
Christoph Probsta205d502019-05-08 21:36:25 +02003530 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05003531 ses->Suid, offset, len);
3532
Zhang Xiaoxu6b690402020-06-23 07:31:54 -04003533 /*
3534 * We zero the range through ioctl, so we need remove the page caches
3535 * first, otherwise the data may be inconsistent with the server.
3536 */
3537 truncate_pagecache_range(inode, offset, offset + len - 1);
Steve French779ede02019-03-13 01:41:49 -05003538
Steve French30175622014-08-17 18:16:40 -05003539 /* if file not oplocked can't be sure whether asking to extend size */
3540 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003541 if (keep_size == false) {
3542 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003543 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
3544 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003545 free_xid(xid);
3546 return rc;
3547 }
Steve French30175622014-08-17 18:16:40 -05003548
Steve Frenchd1c35af2019-05-09 00:09:37 -05003549 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05003550
3551 fsctl_buf.FileOffset = cpu_to_le64(offset);
3552 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3553
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003554 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3555 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
3556 (char *)&fsctl_buf,
3557 sizeof(struct file_zero_data_information),
3558 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003559 if (rc)
3560 goto zero_range_exit;
3561
3562 /*
3563 * do we also need to change the size of the file?
3564 */
3565 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003566 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003567 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3568 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003569 }
3570
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003571 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05003572 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05003573 if (rc)
3574 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3575 ses->Suid, offset, len, rc);
3576 else
3577 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3578 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05003579 return rc;
3580}
3581
Steve French31742c52014-08-17 08:38:47 -05003582static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3583 loff_t offset, loff_t len)
3584{
3585 struct inode *inode;
Steve French31742c52014-08-17 08:38:47 -05003586 struct cifsFileInfo *cfile = file->private_data;
3587 struct file_zero_data_information fsctl_buf;
3588 long rc;
3589 unsigned int xid;
3590 __u8 set_sparse = 1;
3591
3592 xid = get_xid();
3593
David Howells2b0143b2015-03-17 22:25:59 +00003594 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05003595
3596 /* Need to make file sparse, if not already, before freeing range. */
3597 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05003598 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3599 rc = -EOPNOTSUPP;
3600 free_xid(xid);
3601 return rc;
3602 }
Steve French31742c52014-08-17 08:38:47 -05003603
Jan Karab092b3e2021-04-22 16:52:32 +02003604 filemap_invalidate_lock(inode->i_mapping);
Zhang Xiaoxuacc91c22020-06-23 07:31:53 -04003605 /*
3606 * We implement the punch hole through ioctl, so we need remove the page
3607 * caches first, otherwise the data may be inconsistent with the server.
3608 */
3609 truncate_pagecache_range(inode, offset, offset + len - 1);
3610
Christoph Probsta205d502019-05-08 21:36:25 +02003611 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05003612
3613 fsctl_buf.FileOffset = cpu_to_le64(offset);
3614 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3615
3616 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3617 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01003618 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05003619 sizeof(struct file_zero_data_information),
3620 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05003621 free_xid(xid);
Jan Karab092b3e2021-04-22 16:52:32 +02003622 filemap_invalidate_unlock(inode->i_mapping);
Steve French31742c52014-08-17 08:38:47 -05003623 return rc;
3624}
3625
Ronnie Sahlberg966a3cb2021-06-03 15:31:01 +10003626static int smb3_simple_fallocate_write_range(unsigned int xid,
3627 struct cifs_tcon *tcon,
3628 struct cifsFileInfo *cfile,
3629 loff_t off, loff_t len,
3630 char *buf)
3631{
3632 struct cifs_io_parms io_parms = {0};
Steve French5ad4df52021-07-26 16:22:55 -05003633 int nbytes;
3634 int rc = 0;
Ronnie Sahlberg966a3cb2021-06-03 15:31:01 +10003635 struct kvec iov[2];
3636
3637 io_parms.netfid = cfile->fid.netfid;
3638 io_parms.pid = current->tgid;
3639 io_parms.tcon = tcon;
3640 io_parms.persistent_fid = cfile->fid.persistent_fid;
3641 io_parms.volatile_fid = cfile->fid.volatile_fid;
Ronnie Sahlberg966a3cb2021-06-03 15:31:01 +10003642
Ronnie Sahlberg2485bd72021-07-22 14:53:32 +10003643 while (len) {
3644 io_parms.offset = off;
3645 io_parms.length = len;
3646 if (io_parms.length > SMB2_MAX_BUFFER_SIZE)
3647 io_parms.length = SMB2_MAX_BUFFER_SIZE;
3648 /* iov[0] is reserved for smb header */
3649 iov[1].iov_base = buf;
3650 iov[1].iov_len = io_parms.length;
3651 rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1);
3652 if (rc)
3653 break;
3654 if (nbytes > len)
3655 return -EINVAL;
3656 buf += nbytes;
3657 off += nbytes;
3658 len -= nbytes;
3659 }
3660 return rc;
Ronnie Sahlberg966a3cb2021-06-03 15:31:01 +10003661}
3662
3663static int smb3_simple_fallocate_range(unsigned int xid,
3664 struct cifs_tcon *tcon,
3665 struct cifsFileInfo *cfile,
3666 loff_t off, loff_t len)
3667{
3668 struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data;
3669 u32 out_data_len;
3670 char *buf = NULL;
3671 loff_t l;
3672 int rc;
3673
3674 in_data.file_offset = cpu_to_le64(off);
3675 in_data.length = cpu_to_le64(len);
3676 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3677 cfile->fid.volatile_fid,
3678 FSCTL_QUERY_ALLOCATED_RANGES, true,
3679 (char *)&in_data, sizeof(in_data),
3680 1024 * sizeof(struct file_allocated_range_buffer),
3681 (char **)&out_data, &out_data_len);
3682 if (rc)
3683 goto out;
Ronnie Sahlberg966a3cb2021-06-03 15:31:01 +10003684
3685 buf = kzalloc(1024 * 1024, GFP_KERNEL);
3686 if (buf == NULL) {
3687 rc = -ENOMEM;
3688 goto out;
3689 }
3690
3691 tmp_data = out_data;
3692 while (len) {
3693 /*
3694 * The rest of the region is unmapped so write it all.
3695 */
3696 if (out_data_len == 0) {
3697 rc = smb3_simple_fallocate_write_range(xid, tcon,
3698 cfile, off, len, buf);
3699 goto out;
3700 }
3701
3702 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3703 rc = -EINVAL;
3704 goto out;
3705 }
3706
3707 if (off < le64_to_cpu(tmp_data->file_offset)) {
3708 /*
3709 * We are at a hole. Write until the end of the region
3710 * or until the next allocated data,
3711 * whichever comes next.
3712 */
3713 l = le64_to_cpu(tmp_data->file_offset) - off;
3714 if (len < l)
3715 l = len;
3716 rc = smb3_simple_fallocate_write_range(xid, tcon,
3717 cfile, off, l, buf);
3718 if (rc)
3719 goto out;
3720 off = off + l;
3721 len = len - l;
3722 if (len == 0)
3723 goto out;
3724 }
3725 /*
3726 * We are at a section of allocated data, just skip forward
3727 * until the end of the data or the end of the region
3728 * we are supposed to fallocate, whichever comes first.
3729 */
3730 l = le64_to_cpu(tmp_data->length);
3731 if (len < l)
3732 l = len;
3733 off += l;
3734 len -= l;
3735
3736 tmp_data = &tmp_data[1];
3737 out_data_len -= sizeof(struct file_allocated_range_buffer);
3738 }
3739
3740 out:
3741 kfree(out_data);
3742 kfree(buf);
3743 return rc;
3744}
3745
3746
Steve French9ccf3212014-10-18 17:01:15 -05003747static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3748 loff_t off, loff_t len, bool keep_size)
3749{
3750 struct inode *inode;
3751 struct cifsInodeInfo *cifsi;
3752 struct cifsFileInfo *cfile = file->private_data;
3753 long rc = -EOPNOTSUPP;
3754 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003755 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05003756
3757 xid = get_xid();
3758
David Howells2b0143b2015-03-17 22:25:59 +00003759 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05003760 cifsi = CIFS_I(inode);
3761
Steve French779ede02019-03-13 01:41:49 -05003762 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3763 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003764 /* if file not oplocked can't be sure whether asking to extend size */
3765 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003766 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05003767 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3768 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003769 free_xid(xid);
3770 return rc;
3771 }
Steve French9ccf3212014-10-18 17:01:15 -05003772
3773 /*
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003774 * Extending the file
3775 */
3776 if ((keep_size == false) && i_size_read(inode) < off + len) {
Murphy Zhouef4a6322020-03-18 20:43:38 +08003777 rc = inode_newsize_ok(inode, off + len);
3778 if (rc)
3779 goto out;
3780
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003781 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0)
3782 smb2_set_sparse(xid, tcon, cfile, inode, false);
3783
3784 eof = cpu_to_le64(off + len);
3785 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3786 cfile->fid.volatile_fid, cfile->pid, &eof);
3787 if (rc == 0) {
3788 cifsi->server_eof = off + len;
3789 cifs_setsize(inode, off + len);
3790 cifs_truncate_page(inode->i_mapping, inode->i_size);
3791 truncate_setsize(inode, off + len);
3792 }
3793 goto out;
3794 }
3795
3796 /*
Steve French9ccf3212014-10-18 17:01:15 -05003797 * Files are non-sparse by default so falloc may be a no-op
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003798 * Must check if file sparse. If not sparse, and since we are not
3799 * extending then no need to do anything since file already allocated
Steve French9ccf3212014-10-18 17:01:15 -05003800 */
3801 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003802 rc = 0;
3803 goto out;
Steve French9ccf3212014-10-18 17:01:15 -05003804 }
3805
Ronnie Sahlberg488968a2021-07-23 11:21:24 +10003806 if (keep_size == true) {
3807 /*
3808 * We can not preallocate pages beyond the end of the file
3809 * in SMB2
3810 */
3811 if (off >= i_size_read(inode)) {
3812 rc = 0;
3813 goto out;
3814 }
3815 /*
3816 * For fallocates that are partially beyond the end of file,
3817 * clamp len so we only fallocate up to the end of file.
3818 */
3819 if (off + len > i_size_read(inode)) {
3820 len = i_size_read(inode) - off;
3821 }
3822 }
3823
Steve French9ccf3212014-10-18 17:01:15 -05003824 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3825 /*
Ronnie Sahlberg966a3cb2021-06-03 15:31:01 +10003826 * At this point, we are trying to fallocate an internal
3827 * regions of a sparse file. Since smb2 does not have a
3828 * fallocate command we have two otions on how to emulate this.
3829 * We can either turn the entire file to become non-sparse
3830 * which we only do if the fallocate is for virtually
3831 * the whole file, or we can overwrite the region with zeroes
3832 * using SMB2_write, which could be prohibitevly expensive
3833 * if len is large.
3834 */
3835 /*
3836 * We are only trying to fallocate a small region so
3837 * just write it with zero.
3838 */
3839 if (len <= 1024 * 1024) {
3840 rc = smb3_simple_fallocate_range(xid, tcon, cfile,
3841 off, len);
3842 goto out;
3843 }
3844
3845 /*
Steve French9ccf3212014-10-18 17:01:15 -05003846 * Check if falloc starts within first few pages of file
3847 * and ends within a few pages of the end of file to
3848 * ensure that most of file is being forced to be
3849 * fallocated now. If so then setting whole file sparse
3850 * ie potentially making a few extra pages at the beginning
3851 * or end of the file non-sparse via set_sparse is harmless.
3852 */
Steve Frenchcfe89092018-05-19 02:04:55 -05003853 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3854 rc = -EOPNOTSUPP;
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003855 goto out;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003856 }
Steve French9ccf3212014-10-18 17:01:15 -05003857 }
Steve French9ccf3212014-10-18 17:01:15 -05003858
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003859 smb2_set_sparse(xid, tcon, cfile, inode, false);
3860 rc = 0;
3861
3862out:
Steve French779ede02019-03-13 01:41:49 -05003863 if (rc)
3864 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3865 tcon->ses->Suid, off, len, rc);
3866 else
3867 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3868 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003869
3870 free_xid(xid);
3871 return rc;
3872}
3873
Ronnie Sahlberg5476b5d2021-03-27 05:52:29 +10003874static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
3875 loff_t off, loff_t len)
3876{
3877 int rc;
3878 unsigned int xid;
3879 struct cifsFileInfo *cfile = file->private_data;
3880 __le64 eof;
3881
3882 xid = get_xid();
3883
3884 if (off >= i_size_read(file->f_inode) ||
3885 off + len >= i_size_read(file->f_inode)) {
3886 rc = -EINVAL;
3887 goto out;
3888 }
3889
3890 rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
3891 i_size_read(file->f_inode) - off - len, off);
3892 if (rc < 0)
3893 goto out;
3894
3895 eof = cpu_to_le64(i_size_read(file->f_inode) - len);
3896 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3897 cfile->fid.volatile_fid, cfile->pid, &eof);
3898 if (rc < 0)
3899 goto out;
3900
3901 rc = 0;
3902 out:
3903 free_xid(xid);
3904 return rc;
3905}
3906
Ronnie Sahlberg7fe6fe92021-03-27 06:31:30 +10003907static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
3908 loff_t off, loff_t len)
3909{
3910 int rc;
3911 unsigned int xid;
3912 struct cifsFileInfo *cfile = file->private_data;
3913 __le64 eof;
3914 __u64 count;
3915
3916 xid = get_xid();
3917
3918 if (off >= i_size_read(file->f_inode)) {
3919 rc = -EINVAL;
3920 goto out;
3921 }
3922
3923 count = i_size_read(file->f_inode) - off;
3924 eof = cpu_to_le64(i_size_read(file->f_inode) + len);
3925
3926 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3927 cfile->fid.volatile_fid, cfile->pid, &eof);
3928 if (rc < 0)
3929 goto out;
3930
3931 rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
3932 if (rc < 0)
3933 goto out;
3934
3935 rc = smb3_zero_range(file, tcon, off, len, 1);
3936 if (rc < 0)
3937 goto out;
3938
3939 rc = 0;
3940 out:
3941 free_xid(xid);
3942 return rc;
3943}
3944
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003945static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3946{
3947 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3948 struct cifsInodeInfo *cifsi;
3949 struct inode *inode;
3950 int rc = 0;
3951 struct file_allocated_range_buffer in_data, *out_data = NULL;
3952 u32 out_data_len;
3953 unsigned int xid;
3954
3955 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3956 return generic_file_llseek(file, offset, whence);
3957
3958 inode = d_inode(cfile->dentry);
3959 cifsi = CIFS_I(inode);
3960
3961 if (offset < 0 || offset >= i_size_read(inode))
3962 return -ENXIO;
3963
3964 xid = get_xid();
3965 /*
3966 * We need to be sure that all dirty pages are written as they
3967 * might fill holes on the server.
3968 * Note that we also MUST flush any written pages since at least
3969 * some servers (Windows2016) will not reflect recent writes in
3970 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3971 */
Aurelien Aptel86f740f2020-02-21 11:19:06 +01003972 wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003973 if (wrcfile) {
3974 filemap_write_and_wait(inode->i_mapping);
3975 smb2_flush_file(xid, tcon, &wrcfile->fid);
3976 cifsFileInfo_put(wrcfile);
3977 }
3978
3979 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3980 if (whence == SEEK_HOLE)
3981 offset = i_size_read(inode);
3982 goto lseek_exit;
3983 }
3984
3985 in_data.file_offset = cpu_to_le64(offset);
3986 in_data.length = cpu_to_le64(i_size_read(inode));
3987
3988 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3989 cfile->fid.volatile_fid,
3990 FSCTL_QUERY_ALLOCATED_RANGES, true,
3991 (char *)&in_data, sizeof(in_data),
3992 sizeof(struct file_allocated_range_buffer),
3993 (char **)&out_data, &out_data_len);
3994 if (rc == -E2BIG)
3995 rc = 0;
3996 if (rc)
3997 goto lseek_exit;
3998
3999 if (whence == SEEK_HOLE && out_data_len == 0)
4000 goto lseek_exit;
4001
4002 if (whence == SEEK_DATA && out_data_len == 0) {
4003 rc = -ENXIO;
4004 goto lseek_exit;
4005 }
4006
4007 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
4008 rc = -EINVAL;
4009 goto lseek_exit;
4010 }
4011 if (whence == SEEK_DATA) {
4012 offset = le64_to_cpu(out_data->file_offset);
4013 goto lseek_exit;
4014 }
4015 if (offset < le64_to_cpu(out_data->file_offset))
4016 goto lseek_exit;
4017
4018 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
4019
4020 lseek_exit:
4021 free_xid(xid);
4022 kfree(out_data);
4023 if (!rc)
4024 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
4025 else
4026 return rc;
4027}
4028
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004029static int smb3_fiemap(struct cifs_tcon *tcon,
4030 struct cifsFileInfo *cfile,
4031 struct fiemap_extent_info *fei, u64 start, u64 len)
4032{
4033 unsigned int xid;
4034 struct file_allocated_range_buffer in_data, *out_data;
4035 u32 out_data_len;
4036 int i, num, rc, flags, last_blob;
4037 u64 next;
4038
Christoph Hellwig45dd0522020-05-23 09:30:14 +02004039 rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
Christoph Hellwigcddf8a22020-05-23 09:30:13 +02004040 if (rc)
4041 return rc;
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004042
4043 xid = get_xid();
4044 again:
4045 in_data.file_offset = cpu_to_le64(start);
4046 in_data.length = cpu_to_le64(len);
4047
4048 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
4049 cfile->fid.volatile_fid,
4050 FSCTL_QUERY_ALLOCATED_RANGES, true,
4051 (char *)&in_data, sizeof(in_data),
4052 1024 * sizeof(struct file_allocated_range_buffer),
4053 (char **)&out_data, &out_data_len);
4054 if (rc == -E2BIG) {
4055 last_blob = 0;
4056 rc = 0;
4057 } else
4058 last_blob = 1;
4059 if (rc)
4060 goto out;
4061
Murphy Zhou979a2662020-03-14 11:38:31 +08004062 if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10004063 rc = -EINVAL;
4064 goto out;
4065 }
4066 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
4067 rc = -EINVAL;
4068 goto out;
4069 }
4070
4071 num = out_data_len / sizeof(struct file_allocated_range_buffer);
4072 for (i = 0; i < num; i++) {
4073 flags = 0;
4074 if (i == num - 1 && last_blob)
4075 flags |= FIEMAP_EXTENT_LAST;
4076
4077 rc = fiemap_fill_next_extent(fei,
4078 le64_to_cpu(out_data[i].file_offset),
4079 le64_to_cpu(out_data[i].file_offset),
4080 le64_to_cpu(out_data[i].length),
4081 flags);
4082 if (rc < 0)
4083 goto out;
4084 if (rc == 1) {
4085 rc = 0;
4086 goto out;
4087 }
4088 }
4089
4090 if (!last_blob) {
4091 next = le64_to_cpu(out_data[num - 1].file_offset) +
4092 le64_to_cpu(out_data[num - 1].length);
4093 len = len - (next - start);
4094 start = next;
4095 goto again;
4096 }
4097
4098 out:
4099 free_xid(xid);
4100 kfree(out_data);
4101 return rc;
4102}
Steve French9ccf3212014-10-18 17:01:15 -05004103
Steve French31742c52014-08-17 08:38:47 -05004104static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
4105 loff_t off, loff_t len)
4106{
4107 /* KEEP_SIZE already checked for by do_fallocate */
4108 if (mode & FALLOC_FL_PUNCH_HOLE)
4109 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05004110 else if (mode & FALLOC_FL_ZERO_RANGE) {
4111 if (mode & FALLOC_FL_KEEP_SIZE)
4112 return smb3_zero_range(file, tcon, off, len, true);
4113 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05004114 } else if (mode == FALLOC_FL_KEEP_SIZE)
4115 return smb3_simple_falloc(file, tcon, off, len, true);
Ronnie Sahlberg5476b5d2021-03-27 05:52:29 +10004116 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
4117 return smb3_collapse_range(file, tcon, off, len);
Ronnie Sahlberg7fe6fe92021-03-27 06:31:30 +10004118 else if (mode == FALLOC_FL_INSERT_RANGE)
4119 return smb3_insert_range(file, tcon, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05004120 else if (mode == 0)
4121 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05004122
4123 return -EOPNOTSUPP;
4124}
4125
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004126static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004127smb2_downgrade_oplock(struct TCP_Server_Info *server,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004128 struct cifsInodeInfo *cinode, __u32 oplock,
4129 unsigned int epoch, bool *purge_cache)
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004130{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004131 server->ops->set_oplock_level(cinode, oplock, 0, NULL);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004132}
4133
4134static void
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004135smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4136 unsigned int epoch, bool *purge_cache);
4137
4138static void
4139smb3_downgrade_oplock(struct TCP_Server_Info *server,
4140 struct cifsInodeInfo *cinode, __u32 oplock,
4141 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004142{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004143 unsigned int old_state = cinode->oplock;
4144 unsigned int old_epoch = cinode->epoch;
4145 unsigned int new_state;
4146
4147 if (epoch > old_epoch) {
4148 smb21_set_oplock_level(cinode, oplock, 0, NULL);
4149 cinode->epoch = epoch;
4150 }
4151
4152 new_state = cinode->oplock;
4153 *purge_cache = false;
4154
4155 if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
4156 (new_state & CIFS_CACHE_READ_FLG) == 0)
4157 *purge_cache = true;
4158 else if (old_state == new_state && (epoch - old_epoch > 1))
4159 *purge_cache = true;
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08004160}
4161
4162static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004163smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4164 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004165{
4166 oplock &= 0xFF;
Rohith Surabattula0ab95c22021-05-17 11:28:34 +00004167 cinode->lease_granted = false;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004168 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
4169 return;
4170 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004171 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004172 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
4173 &cinode->vfs_inode);
4174 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004175 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004176 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
4177 &cinode->vfs_inode);
4178 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
4179 cinode->oplock = CIFS_CACHE_READ_FLG;
4180 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
4181 &cinode->vfs_inode);
4182 } else
4183 cinode->oplock = 0;
4184}
4185
4186static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004187smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4188 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004189{
4190 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02004191 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004192
4193 oplock &= 0xFF;
Rohith Surabattula0ab95c22021-05-17 11:28:34 +00004194 cinode->lease_granted = true;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004195 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
4196 return;
4197
Pavel Shilovskya016e272019-09-26 12:31:20 -07004198 /* Check if the server granted an oplock rather than a lease */
4199 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
4200 return smb2_set_oplock_level(cinode, oplock, epoch,
4201 purge_cache);
4202
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004203 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02004204 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004205 strcat(message, "R");
4206 }
4207 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02004208 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004209 strcat(message, "H");
4210 }
4211 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02004212 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004213 strcat(message, "W");
4214 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02004215 if (!new_oplock)
4216 strncpy(message, "None", sizeof(message));
4217
4218 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004219 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
4220 &cinode->vfs_inode);
4221}
4222
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004223static void
4224smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4225 unsigned int epoch, bool *purge_cache)
4226{
4227 unsigned int old_oplock = cinode->oplock;
4228
4229 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
4230
4231 if (purge_cache) {
4232 *purge_cache = false;
4233 if (old_oplock == CIFS_CACHE_READ_FLG) {
4234 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
4235 (epoch - cinode->epoch > 0))
4236 *purge_cache = true;
4237 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
4238 (epoch - cinode->epoch > 1))
4239 *purge_cache = true;
4240 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
4241 (epoch - cinode->epoch > 1))
4242 *purge_cache = true;
4243 else if (cinode->oplock == 0 &&
4244 (epoch - cinode->epoch > 0))
4245 *purge_cache = true;
4246 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
4247 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
4248 (epoch - cinode->epoch > 0))
4249 *purge_cache = true;
4250 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
4251 (epoch - cinode->epoch > 1))
4252 *purge_cache = true;
4253 }
4254 cinode->epoch = epoch;
4255 }
4256}
4257
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004258static bool
4259smb2_is_read_op(__u32 oplock)
4260{
4261 return oplock == SMB2_OPLOCK_LEVEL_II;
4262}
4263
4264static bool
4265smb21_is_read_op(__u32 oplock)
4266{
4267 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
4268 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
4269}
4270
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004271static __le32
4272map_oplock_to_lease(u8 oplock)
4273{
4274 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
4275 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
4276 else if (oplock == SMB2_OPLOCK_LEVEL_II)
4277 return SMB2_LEASE_READ_CACHING;
4278 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
4279 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
4280 SMB2_LEASE_WRITE_CACHING;
4281 return 0;
4282}
4283
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004284static char *
4285smb2_create_lease_buf(u8 *lease_key, u8 oplock)
4286{
4287 struct create_lease *buf;
4288
4289 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
4290 if (!buf)
4291 return NULL;
4292
Stefano Brivio729c0c92018-07-05 15:10:02 +02004293 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004294 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004295
4296 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4297 (struct create_lease, lcontext));
4298 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
4299 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4300 (struct create_lease, Name));
4301 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07004302 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004303 buf->Name[0] = 'R';
4304 buf->Name[1] = 'q';
4305 buf->Name[2] = 'L';
4306 buf->Name[3] = 's';
4307 return (char *)buf;
4308}
4309
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004310static char *
4311smb3_create_lease_buf(u8 *lease_key, u8 oplock)
4312{
4313 struct create_lease_v2 *buf;
4314
4315 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
4316 if (!buf)
4317 return NULL;
4318
Stefano Brivio729c0c92018-07-05 15:10:02 +02004319 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004320 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
4321
4322 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4323 (struct create_lease_v2, lcontext));
4324 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
4325 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4326 (struct create_lease_v2, Name));
4327 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07004328 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004329 buf->Name[0] = 'R';
4330 buf->Name[1] = 'q';
4331 buf->Name[2] = 'L';
4332 buf->Name[3] = 's';
4333 return (char *)buf;
4334}
4335
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004336static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06004337smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004338{
4339 struct create_lease *lc = (struct create_lease *)buf;
4340
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004341 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004342 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
4343 return SMB2_OPLOCK_LEVEL_NOCHANGE;
4344 return le32_to_cpu(lc->lcontext.LeaseState);
4345}
4346
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004347static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06004348smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004349{
4350 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
4351
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004352 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004353 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
4354 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06004355 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02004356 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004357 return le32_to_cpu(lc->lcontext.LeaseState);
4358}
4359
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004360static unsigned int
4361smb2_wp_retry_size(struct inode *inode)
4362{
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10004363 return min_t(unsigned int, CIFS_SB(inode->i_sb)->ctx->wsize,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004364 SMB2_MAX_BUFFER_SIZE);
4365}
4366
Pavel Shilovsky52755802014-08-18 20:49:57 +04004367static bool
4368smb2_dir_needs_close(struct cifsFileInfo *cfile)
4369{
4370 return !cfile->invalidHandle;
4371}
4372
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004373static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004374fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
Steve French2b2f7542019-06-07 15:16:10 -05004375 struct smb_rqst *old_rq, __le16 cipher_type)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004376{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004377 struct smb2_hdr *shdr =
4378 (struct smb2_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004379
4380 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
4381 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
4382 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
4383 tr_hdr->Flags = cpu_to_le16(0x01);
Steve French63ca5652020-10-15 23:41:40 -05004384 if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4385 (cipher_type == SMB2_ENCRYPTION_AES256_GCM))
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004386 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05004387 else
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004388 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004389 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004390}
4391
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11004392/* We can not use the normal sg_set_buf() as we will sometimes pass a
4393 * stack object as buf.
4394 */
4395static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
4396 unsigned int buflen)
4397{
Sebastien Tisserantee9d6612019-08-01 12:06:08 -05004398 void *addr;
4399 /*
4400 * VMAP_STACK (at least) puts stack into the vmalloc address space
4401 */
4402 if (is_vmalloc_addr(buf))
4403 addr = vmalloc_to_page(buf);
4404 else
4405 addr = virt_to_page(buf);
4406 sg_set_page(sg, addr, buflen, offset_in_page(buf));
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11004407}
4408
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004409/* Assumes the first rqst has a transform header as the first iov.
4410 * I.e.
4411 * rqst[0].rq_iov[0] is transform header
4412 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
4413 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004414 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004415static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004416init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004417{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004418 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004419 struct scatterlist *sg;
4420 unsigned int i;
4421 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004422 unsigned int idx = 0;
4423 int skip;
4424
4425 sg_len = 1;
4426 for (i = 0; i < num_rqst; i++)
4427 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004428
4429 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
4430 if (!sg)
4431 return NULL;
4432
4433 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004434 for (i = 0; i < num_rqst; i++) {
4435 for (j = 0; j < rqst[i].rq_nvec; j++) {
4436 /*
4437 * The first rqst has a transform header where the
4438 * first 20 bytes are not part of the encrypted blob
4439 */
4440 skip = (i == 0) && (j == 0) ? 20 : 0;
4441 smb2_sg_set_buf(&sg[idx++],
4442 rqst[i].rq_iov[j].iov_base + skip,
4443 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10004444 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05004445
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004446 for (j = 0; j < rqst[i].rq_npages; j++) {
4447 unsigned int len, offset;
4448
4449 rqst_page_get_length(&rqst[i], j, &len, &offset);
4450 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
4451 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004452 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004453 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004454 return sg;
4455}
4456
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004457static int
4458smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
4459{
4460 struct cifs_ses *ses;
4461 u8 *ses_enc_key;
4462
4463 spin_lock(&cifs_tcp_ses_lock);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02004464 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
4465 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
4466 if (ses->Suid == ses_id) {
4467 ses_enc_key = enc ? ses->smb3encryptionkey :
4468 ses->smb3decryptionkey;
Shyam Prasad N45a45462021-03-25 12:34:54 +00004469 memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02004470 spin_unlock(&cifs_tcp_ses_lock);
4471 return 0;
4472 }
4473 }
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004474 }
4475 spin_unlock(&cifs_tcp_ses_lock);
4476
Paul Aurich83728cb2021-04-13 14:25:27 -07004477 return -EAGAIN;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004478}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004479/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004480 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
4481 * iov[0] - transform header (associate data),
4482 * iov[1-N] - SMB2 header and pages - data to encrypt.
4483 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004484 * untouched.
4485 */
4486static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004487crypt_message(struct TCP_Server_Info *server, int num_rqst,
4488 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004489{
4490 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004491 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004492 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004493 int rc = 0;
4494 struct scatterlist *sg;
4495 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Shyam Prasad N45a45462021-03-25 12:34:54 +00004496 u8 key[SMB3_ENC_DEC_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004497 struct aead_request *req;
4498 char *iv;
4499 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01004500 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004501 struct crypto_aead *tfm;
4502 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4503
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004504 rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004505 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004506 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004507 enc ? "en" : "de");
Shyam Prasad N0bd294b2020-10-15 10:41:31 -07004508 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004509 }
4510
4511 rc = smb3_crypto_aead_allocate(server);
4512 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004513 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004514 return rc;
4515 }
4516
4517 tfm = enc ? server->secmech.ccmaesencrypt :
4518 server->secmech.ccmaesdecrypt;
Steve French63ca5652020-10-15 23:41:40 -05004519
Shyam Prasad N45a45462021-03-25 12:34:54 +00004520 if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
4521 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
Steve French63ca5652020-10-15 23:41:40 -05004522 rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
4523 else
Shyam Prasad N45a45462021-03-25 12:34:54 +00004524 rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
Steve French63ca5652020-10-15 23:41:40 -05004525
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004526 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004527 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004528 return rc;
4529 }
4530
4531 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
4532 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004533 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004534 return rc;
4535 }
4536
4537 req = aead_request_alloc(tfm, GFP_KERNEL);
4538 if (!req) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004539 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004540 return -ENOMEM;
4541 }
4542
4543 if (!enc) {
4544 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
4545 crypt_len += SMB2_SIGNATURE_SIZE;
4546 }
4547
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004548 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004549 if (!sg) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004550 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02004551 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004552 goto free_req;
4553 }
4554
4555 iv_len = crypto_aead_ivsize(tfm);
4556 iv = kzalloc(iv_len, GFP_KERNEL);
4557 if (!iv) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004558 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02004559 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004560 goto free_sg;
4561 }
Steve French2b2f7542019-06-07 15:16:10 -05004562
Steve French63ca5652020-10-15 23:41:40 -05004563 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4564 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004565 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05004566 else {
4567 iv[0] = 3;
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004568 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05004569 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004570
4571 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
4572 aead_request_set_ad(req, assoc_data_len);
4573
4574 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01004575 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004576
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01004577 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
4578 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004579
4580 if (!rc && enc)
4581 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
4582
4583 kfree(iv);
4584free_sg:
4585 kfree(sg);
4586free_req:
4587 kfree(req);
4588 return rc;
4589}
4590
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004591void
4592smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004593{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004594 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004595
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004596 for (i = 0; i < num_rqst; i++) {
4597 if (rqst[i].rq_pages) {
4598 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
4599 put_page(rqst[i].rq_pages[j]);
4600 kfree(rqst[i].rq_pages);
4601 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004602 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004603}
4604
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004605/*
4606 * This function will initialize new_rq and encrypt the content.
4607 * The first entry, new_rq[0], only contains a single iov which contains
4608 * a smb2_transform_hdr and is pre-allocated by the caller.
4609 * This function then populates new_rq[1+] with the content from olq_rq[0+].
4610 *
4611 * The end result is an array of smb_rqst structures where the first structure
4612 * only contains a single iov for the transform header which we then can pass
4613 * to crypt_message().
4614 *
4615 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
4616 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
4617 */
4618static int
4619smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
4620 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004621{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004622 struct page **pages;
4623 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
4624 unsigned int npages;
4625 unsigned int orig_len = 0;
4626 int i, j;
4627 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004628
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004629 for (i = 1; i < num_rqst; i++) {
4630 npages = old_rq[i - 1].rq_npages;
4631 pages = kmalloc_array(npages, sizeof(struct page *),
4632 GFP_KERNEL);
4633 if (!pages)
4634 goto err_free;
4635
4636 new_rq[i].rq_pages = pages;
4637 new_rq[i].rq_npages = npages;
4638 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
4639 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
4640 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
4641 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
4642 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
4643
4644 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
4645
4646 for (j = 0; j < npages; j++) {
4647 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4648 if (!pages[j])
4649 goto err_free;
4650 }
4651
4652 /* copy pages form the old */
4653 for (j = 0; j < npages; j++) {
4654 char *dst, *src;
4655 unsigned int offset, len;
4656
4657 rqst_page_get_length(&new_rq[i], j, &len, &offset);
4658
4659 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
4660 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
4661
4662 memcpy(dst, src, len);
4663 kunmap(new_rq[i].rq_pages[j]);
4664 kunmap(old_rq[i - 1].rq_pages[j]);
4665 }
4666 }
4667
4668 /* fill the 1st iov with a transform header */
Steve French2b2f7542019-06-07 15:16:10 -05004669 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004670
4671 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02004672 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004673 if (rc)
4674 goto err_free;
4675
4676 return rc;
4677
4678err_free:
4679 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
4680 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004681}
4682
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004683static int
4684smb3_is_transform_hdr(void *buf)
4685{
4686 struct smb2_transform_hdr *trhdr = buf;
4687
4688 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
4689}
4690
4691static int
4692decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
4693 unsigned int buf_data_size, struct page **pages,
Rohith Surabattula62593012020-10-08 09:58:41 +00004694 unsigned int npages, unsigned int page_data_size,
4695 bool is_offloaded)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004696{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004697 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004698 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004699 int rc;
4700
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004701 iov[0].iov_base = buf;
4702 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
4703 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
4704 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004705
4706 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004707 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004708 rqst.rq_pages = pages;
4709 rqst.rq_npages = npages;
4710 rqst.rq_pagesz = PAGE_SIZE;
4711 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
4712
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004713 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02004714 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004715
4716 if (rc)
4717 return rc;
4718
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004719 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004720
Rohith Surabattula62593012020-10-08 09:58:41 +00004721 if (!is_offloaded)
4722 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004723
4724 return rc;
4725}
4726
4727static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004728read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
4729 unsigned int npages, unsigned int len)
4730{
4731 int i;
4732 int length;
4733
4734 for (i = 0; i < npages; i++) {
4735 struct page *page = pages[i];
4736 size_t n;
4737
4738 n = len;
4739 if (len >= PAGE_SIZE) {
4740 /* enough data to fill the page */
4741 n = PAGE_SIZE;
4742 len -= n;
4743 } else {
4744 zero_user(page, len, PAGE_SIZE - len);
4745 len = 0;
4746 }
Long Li1dbe3462018-05-30 12:47:55 -07004747 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004748 if (length < 0)
4749 return length;
4750 server->total_read += length;
4751 }
4752
4753 return 0;
4754}
4755
4756static int
4757init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
4758 unsigned int cur_off, struct bio_vec **page_vec)
4759{
4760 struct bio_vec *bvec;
4761 int i;
4762
4763 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
4764 if (!bvec)
4765 return -ENOMEM;
4766
4767 for (i = 0; i < npages; i++) {
4768 bvec[i].bv_page = pages[i];
4769 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
4770 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
4771 data_size -= bvec[i].bv_len;
4772 }
4773
4774 if (data_size != 0) {
4775 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
4776 kfree(bvec);
4777 return -EIO;
4778 }
4779
4780 *page_vec = bvec;
4781 return 0;
4782}
4783
4784static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004785handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
4786 char *buf, unsigned int buf_len, struct page **pages,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004787 unsigned int npages, unsigned int page_data_size,
4788 bool is_offloaded)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004789{
4790 unsigned int data_offset;
4791 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004792 unsigned int cur_off;
4793 unsigned int cur_page_idx;
4794 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004795 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09004796 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004797 struct bio_vec *bvec = NULL;
4798 struct iov_iter iter;
4799 struct kvec iov;
4800 int length;
Long Li74dcf412017-11-22 17:38:46 -07004801 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004802
4803 if (shdr->Command != SMB2_READ) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004804 cifs_server_dbg(VFS, "only big read responses are supported\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004805 return -ENOTSUPP;
4806 }
4807
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004808 if (server->ops->is_session_expired &&
4809 server->ops->is_session_expired(buf)) {
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004810 if (!is_offloaded)
4811 cifs_reconnect(server);
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004812 return -1;
4813 }
4814
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004815 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08004816 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004817 return -1;
4818
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004819 /* set up first two iov to get credits */
4820 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004821 rdata->iov[0].iov_len = 0;
4822 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004823 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004824 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004825 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4826 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4827 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4828 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4829
4830 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004831 if (rdata->result != 0) {
4832 cifs_dbg(FYI, "%s: server returned error %d\n",
4833 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004834 /* normal error on read response */
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004835 if (is_offloaded)
4836 mid->mid_state = MID_RESPONSE_RECEIVED;
4837 else
4838 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004839 return 0;
4840 }
4841
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004842 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07004843#ifdef CONFIG_CIFS_SMB_DIRECT
4844 use_rdma_mr = rdata->mr;
4845#endif
4846 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004847
4848 if (data_offset < server->vals->read_rsp_size) {
4849 /*
4850 * win2k8 sometimes sends an offset of 0 when the read
4851 * is beyond the EOF. Treat it as if the data starts just after
4852 * the header.
4853 */
4854 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4855 __func__, data_offset);
4856 data_offset = server->vals->read_rsp_size;
4857 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4858 /* data_offset is beyond the end of smallbuf */
4859 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4860 __func__, data_offset);
4861 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004862 if (is_offloaded)
4863 mid->mid_state = MID_RESPONSE_MALFORMED;
4864 else
4865 dequeue_mid(mid, rdata->result);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004866 return 0;
4867 }
4868
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004869 pad_len = data_offset - server->vals->read_rsp_size;
4870
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004871 if (buf_len <= data_offset) {
4872 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004873 cur_page_idx = pad_len / PAGE_SIZE;
4874 cur_off = pad_len % PAGE_SIZE;
4875
4876 if (cur_page_idx != 0) {
4877 /* data offset is beyond the 1st page of response */
4878 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4879 __func__, data_offset);
4880 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004881 if (is_offloaded)
4882 mid->mid_state = MID_RESPONSE_MALFORMED;
4883 else
4884 dequeue_mid(mid, rdata->result);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004885 return 0;
4886 }
4887
4888 if (data_len > page_data_size - pad_len) {
4889 /* data_len is corrupt -- discard frame */
4890 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004891 if (is_offloaded)
4892 mid->mid_state = MID_RESPONSE_MALFORMED;
4893 else
4894 dequeue_mid(mid, rdata->result);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004895 return 0;
4896 }
4897
4898 rdata->result = init_read_bvec(pages, npages, page_data_size,
4899 cur_off, &bvec);
4900 if (rdata->result != 0) {
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004901 if (is_offloaded)
4902 mid->mid_state = MID_RESPONSE_MALFORMED;
4903 else
4904 dequeue_mid(mid, rdata->result);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004905 return 0;
4906 }
4907
David Howellsaa563d72018-10-20 00:57:56 +01004908 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004909 } else if (buf_len >= data_offset + data_len) {
4910 /* read response payload is in buf */
4911 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
4912 iov.iov_base = buf + data_offset;
4913 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01004914 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004915 } else {
4916 /* read response payload cannot be in both buf and pages */
4917 WARN_ONCE(1, "buf can not contain only a part of read data");
4918 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004919 if (is_offloaded)
4920 mid->mid_state = MID_RESPONSE_MALFORMED;
4921 else
4922 dequeue_mid(mid, rdata->result);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004923 return 0;
4924 }
4925
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004926 length = rdata->copy_into_pages(server, rdata, &iter);
4927
4928 kfree(bvec);
4929
4930 if (length < 0)
4931 return length;
4932
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004933 if (is_offloaded)
4934 mid->mid_state = MID_RESPONSE_RECEIVED;
4935 else
4936 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004937 return length;
4938}
4939
Steve French35cf94a2019-09-07 01:09:49 -05004940struct smb2_decrypt_work {
4941 struct work_struct decrypt;
4942 struct TCP_Server_Info *server;
4943 struct page **ppages;
4944 char *buf;
4945 unsigned int npages;
4946 unsigned int len;
4947};
4948
4949
4950static void smb2_decrypt_offload(struct work_struct *work)
4951{
4952 struct smb2_decrypt_work *dw = container_of(work,
4953 struct smb2_decrypt_work, decrypt);
4954 int i, rc;
4955 struct mid_q_entry *mid;
4956
4957 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
Rohith Surabattula62593012020-10-08 09:58:41 +00004958 dw->ppages, dw->npages, dw->len, true);
Steve French35cf94a2019-09-07 01:09:49 -05004959 if (rc) {
4960 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4961 goto free_pages;
4962 }
4963
Steve French22553972019-09-13 16:47:31 -05004964 dw->server->lstrp = jiffies;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004965 mid = smb2_find_dequeue_mid(dw->server, dw->buf);
Steve French35cf94a2019-09-07 01:09:49 -05004966 if (mid == NULL)
4967 cifs_dbg(FYI, "mid not found\n");
4968 else {
4969 mid->decrypted = true;
4970 rc = handle_read_data(dw->server, mid, dw->buf,
4971 dw->server->vals->read_rsp_size,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004972 dw->ppages, dw->npages, dw->len,
4973 true);
Rohith Surabattula12541002020-10-29 06:07:56 +00004974 if (rc >= 0) {
4975#ifdef CONFIG_CIFS_STATS2
4976 mid->when_received = jiffies;
4977#endif
Rohith Surabattula9e550b02021-02-16 10:40:45 +00004978 if (dw->server->ops->is_network_name_deleted)
4979 dw->server->ops->is_network_name_deleted(dw->buf,
4980 dw->server);
4981
Rohith Surabattula12541002020-10-29 06:07:56 +00004982 mid->callback(mid);
4983 } else {
4984 spin_lock(&GlobalMid_Lock);
4985 if (dw->server->tcpStatus == CifsNeedReconnect) {
4986 mid->mid_state = MID_RETRY_NEEDED;
4987 spin_unlock(&GlobalMid_Lock);
4988 mid->callback(mid);
4989 } else {
4990 mid->mid_state = MID_REQUEST_SUBMITTED;
4991 mid->mid_flags &= ~(MID_DELETED);
4992 list_add_tail(&mid->qhead,
4993 &dw->server->pending_mid_q);
4994 spin_unlock(&GlobalMid_Lock);
4995 }
4996 }
Steve French22553972019-09-13 16:47:31 -05004997 cifs_mid_q_entry_release(mid);
Steve French35cf94a2019-09-07 01:09:49 -05004998 }
4999
Steve French35cf94a2019-09-07 01:09:49 -05005000free_pages:
5001 for (i = dw->npages-1; i >= 0; i--)
5002 put_page(dw->ppages[i]);
5003
5004 kfree(dw->ppages);
5005 cifs_small_buf_release(dw->buf);
Steve Frencha08d8972019-10-26 16:00:44 -05005006 kfree(dw);
Steve French35cf94a2019-09-07 01:09:49 -05005007}
5008
5009
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005010static int
Steve French35cf94a2019-09-07 01:09:49 -05005011receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
5012 int *num_mids)
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08005013{
5014 char *buf = server->smallbuf;
5015 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
5016 unsigned int npages;
5017 struct page **pages;
5018 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005019 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08005020 int rc;
5021 int i = 0;
Steve French35cf94a2019-09-07 01:09:49 -05005022 struct smb2_decrypt_work *dw;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08005023
Steve French35cf94a2019-09-07 01:09:49 -05005024 *num_mids = 1;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005025 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08005026 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
5027
5028 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
5029 if (rc < 0)
5030 return rc;
5031 server->total_read += rc;
5032
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005033 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11005034 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08005035 npages = DIV_ROUND_UP(len, PAGE_SIZE);
5036
5037 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
5038 if (!pages) {
5039 rc = -ENOMEM;
5040 goto discard_data;
5041 }
5042
5043 for (; i < npages; i++) {
5044 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
5045 if (!pages[i]) {
5046 rc = -ENOMEM;
5047 goto discard_data;
5048 }
5049 }
5050
5051 /* read read data into pages */
5052 rc = read_data_into_pages(server, pages, npages, len);
5053 if (rc)
5054 goto free_pages;
5055
Pavel Shilovsky350be252017-04-10 10:31:33 -07005056 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08005057 if (rc)
5058 goto free_pages;
5059
Steve French35cf94a2019-09-07 01:09:49 -05005060 /*
5061 * For large reads, offload to different thread for better performance,
5062 * use more cores decrypting which can be expensive
5063 */
5064
Steve French10328c42019-09-09 13:30:15 -05005065 if ((server->min_offload) && (server->in_flight > 1) &&
Steve French563317e2019-09-08 23:22:02 -05005066 (server->pdu_size >= server->min_offload)) {
Steve French35cf94a2019-09-07 01:09:49 -05005067 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
5068 if (dw == NULL)
5069 goto non_offloaded_decrypt;
5070
5071 dw->buf = server->smallbuf;
5072 server->smallbuf = (char *)cifs_small_buf_get();
5073
5074 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
5075
5076 dw->npages = npages;
5077 dw->server = server;
5078 dw->ppages = pages;
5079 dw->len = len;
Steve Frencha08d8972019-10-26 16:00:44 -05005080 queue_work(decrypt_wq, &dw->decrypt);
Steve French35cf94a2019-09-07 01:09:49 -05005081 *num_mids = 0; /* worker thread takes care of finding mid */
5082 return -1;
5083 }
5084
5085non_offloaded_decrypt:
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005086 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Rohith Surabattula62593012020-10-08 09:58:41 +00005087 pages, npages, len, false);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08005088 if (rc)
5089 goto free_pages;
5090
5091 *mid = smb2_find_mid(server, buf);
5092 if (*mid == NULL)
5093 cifs_dbg(FYI, "mid not found\n");
5094 else {
5095 cifs_dbg(FYI, "mid found\n");
5096 (*mid)->decrypted = true;
5097 rc = handle_read_data(server, *mid, buf,
5098 server->vals->read_rsp_size,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00005099 pages, npages, len, false);
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005100 if (rc >= 0) {
5101 if (server->ops->is_network_name_deleted) {
5102 server->ops->is_network_name_deleted(buf,
5103 server);
5104 }
5105 }
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08005106 }
5107
5108free_pages:
5109 for (i = i - 1; i >= 0; i--)
5110 put_page(pages[i]);
5111 kfree(pages);
5112 return rc;
5113discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07005114 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08005115 goto free_pages;
5116}
5117
5118static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005119receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005120 struct mid_q_entry **mids, char **bufs,
5121 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005122{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005123 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005124 char *buf = server->smallbuf;
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005125 struct smb2_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10005126 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005127 unsigned int buf_size;
5128 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005129 int next_is_large;
5130 char *next_buffer = NULL;
5131
5132 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005133
5134 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005135 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005136 server->large_buf = true;
5137 memcpy(server->bigbuf, buf, server->total_read);
5138 buf = server->bigbuf;
5139 }
5140
5141 /* now read the rest */
5142 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005143 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005144 if (length < 0)
5145 return length;
5146 server->total_read += length;
5147
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005148 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Rohith Surabattula62593012020-10-08 09:58:41 +00005149 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005150 if (length)
5151 return length;
5152
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005153 next_is_large = server->large_buf;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07005154one_more:
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005155 shdr = (struct smb2_hdr *)buf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005156 if (shdr->NextCommand) {
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07005157 if (next_is_large)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005158 next_buffer = (char *)cifs_buf_get();
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07005159 else
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005160 next_buffer = (char *)cifs_small_buf_get();
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005161 memcpy(next_buffer,
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07005162 buf + le32_to_cpu(shdr->NextCommand),
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005163 pdu_length - le32_to_cpu(shdr->NextCommand));
5164 }
5165
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005166 mid_entry = smb2_find_mid(server, buf);
5167 if (mid_entry == NULL)
5168 cifs_dbg(FYI, "mid not found\n");
5169 else {
5170 cifs_dbg(FYI, "mid found\n");
5171 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005172 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005173 }
5174
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005175 if (*num_mids >= MAX_COMPOUND) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10005176 cifs_server_dbg(VFS, "too many PDUs in compound\n");
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005177 return -1;
5178 }
5179 bufs[*num_mids] = buf;
5180 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005181
5182 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005183 ret = mid_entry->handle(server, mid_entry);
5184 else
5185 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005186
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005187 if (ret == 0 && shdr->NextCommand) {
5188 pdu_length -= le32_to_cpu(shdr->NextCommand);
5189 server->large_buf = next_is_large;
5190 if (next_is_large)
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07005191 server->bigbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005192 else
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07005193 server->smallbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005194 goto one_more;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07005195 } else if (ret != 0) {
5196 /*
5197 * ret != 0 here means that we didn't get to handle_mid() thus
5198 * server->smallbuf and server->bigbuf are still valid. We need
5199 * to free next_buffer because it is not going to be used
5200 * anywhere.
5201 */
5202 if (next_is_large)
5203 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
5204 else
5205 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005206 }
5207
5208 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005209}
5210
5211static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005212smb3_receive_transform(struct TCP_Server_Info *server,
5213 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005214{
5215 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10005216 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005217 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
5218 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
5219
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005220 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005221 sizeof(struct smb2_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10005222 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005223 pdu_length);
5224 cifs_reconnect(server);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005225 return -ECONNABORTED;
5226 }
5227
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005228 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10005229 cifs_server_dbg(VFS, "Transform message is broken\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005230 cifs_reconnect(server);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005231 return -ECONNABORTED;
5232 }
5233
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005234 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08005235 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
Steve French35cf94a2019-09-07 01:09:49 -05005236 return receive_encrypted_read(server, &mids[0], num_mids);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08005237 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005238
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005239 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005240}
5241
5242int
5243smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
5244{
5245 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
5246
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005247 return handle_read_data(server, mid, buf, server->pdu_size,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00005248 NULL, 0, 0, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005249}
5250
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005251static int
5252smb2_next_header(char *buf)
5253{
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005254 struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005255 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
5256
5257 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
5258 return sizeof(struct smb2_transform_hdr) +
5259 le32_to_cpu(t_hdr->OriginalMessageSize);
5260
5261 return le32_to_cpu(hdr->NextCommand);
5262}
5263
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005264static int
5265smb2_make_node(unsigned int xid, struct inode *inode,
5266 struct dentry *dentry, struct cifs_tcon *tcon,
Al Viro55869132021-03-18 01:38:53 -04005267 const char *full_path, umode_t mode, dev_t dev)
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005268{
5269 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
5270 int rc = -EPERM;
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005271 FILE_ALL_INFO *buf = NULL;
Aurelien Aptel7c065142020-06-04 17:23:55 +02005272 struct cifs_io_parms io_parms = {0};
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005273 __u32 oplock = 0;
5274 struct cifs_fid fid;
5275 struct cifs_open_parms oparms;
5276 unsigned int bytes_written;
5277 struct win_dev *pdev;
5278 struct kvec iov[2];
5279
5280 /*
5281 * Check if mounted with mount parm 'sfu' mount parm.
5282 * SFU emulation should work with all servers, but only
5283 * supports block and char device (no socket & fifo),
5284 * and was used by default in earlier versions of Windows
5285 */
5286 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
5287 goto out;
5288
5289 /*
5290 * TODO: Add ability to create instead via reparse point. Windows (e.g.
5291 * their current NFS server) uses this approach to expose special files
5292 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
5293 */
5294
5295 if (!S_ISCHR(mode) && !S_ISBLK(mode))
5296 goto out;
5297
5298 cifs_dbg(FYI, "sfu compat create special file\n");
5299
5300 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
5301 if (buf == NULL) {
5302 rc = -ENOMEM;
5303 goto out;
5304 }
5305
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005306 oparms.tcon = tcon;
5307 oparms.cifs_sb = cifs_sb;
5308 oparms.desired_access = GENERIC_WRITE;
Amir Goldstein0f060932020-02-03 21:46:43 +02005309 oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
5310 CREATE_OPTION_SPECIAL);
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005311 oparms.disposition = FILE_CREATE;
5312 oparms.path = full_path;
5313 oparms.fid = &fid;
5314 oparms.reconnect = false;
5315
5316 if (tcon->ses->server->oplocks)
5317 oplock = REQ_OPLOCK;
5318 else
5319 oplock = 0;
5320 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
5321 if (rc)
5322 goto out;
5323
5324 /*
5325 * BB Do not bother to decode buf since no local inode yet to put
5326 * timestamps in, but we can reuse it safely.
5327 */
5328
5329 pdev = (struct win_dev *)buf;
5330 io_parms.pid = current->tgid;
5331 io_parms.tcon = tcon;
5332 io_parms.offset = 0;
5333 io_parms.length = sizeof(struct win_dev);
5334 iov[1].iov_base = buf;
5335 iov[1].iov_len = sizeof(struct win_dev);
5336 if (S_ISCHR(mode)) {
5337 memcpy(pdev->type, "IntxCHR", 8);
5338 pdev->major = cpu_to_le64(MAJOR(dev));
5339 pdev->minor = cpu_to_le64(MINOR(dev));
5340 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
5341 &bytes_written, iov, 1);
5342 } else if (S_ISBLK(mode)) {
5343 memcpy(pdev->type, "IntxBLK", 8);
5344 pdev->major = cpu_to_le64(MAJOR(dev));
5345 pdev->minor = cpu_to_le64(MINOR(dev));
5346 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
5347 &bytes_written, iov, 1);
5348 }
5349 tcon->ses->server->ops->close(xid, tcon, &fid);
5350 d_drop(dentry);
5351
5352 /* FIXME: add code here to set EAs */
5353out:
5354 kfree(buf);
5355 return rc;
5356}
5357
5358
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005359struct smb_version_operations smb20_operations = {
5360 .compare_fids = smb2_compare_fids,
5361 .setup_request = smb2_setup_request,
5362 .setup_async_request = smb2_setup_async_request,
5363 .check_receive = smb2_check_receive,
5364 .add_credits = smb2_add_credits,
5365 .set_credits = smb2_set_credits,
5366 .get_credits_field = smb2_get_credits_field,
5367 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04005368 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005369 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005370 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005371 .read_data_offset = smb2_read_data_offset,
5372 .read_data_length = smb2_read_data_length,
5373 .map_error = map_smb2_to_linux_error,
5374 .find_mid = smb2_find_mid,
5375 .check_message = smb2_check_message,
5376 .dump_detail = smb2_dump_detail,
5377 .clear_stats = smb2_clear_stats,
5378 .print_stats = smb2_print_stats,
5379 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005380 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00005381 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005382 .need_neg = smb2_need_neg,
5383 .negotiate = smb2_negotiate,
5384 .negotiate_wsize = smb2_negotiate_wsize,
5385 .negotiate_rsize = smb2_negotiate_rsize,
5386 .sess_setup = SMB2_sess_setup,
5387 .logoff = SMB2_logoff,
5388 .tree_connect = SMB2_tcon,
5389 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05005390 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005391 .is_path_accessible = smb2_is_path_accessible,
5392 .can_echo = smb2_can_echo,
5393 .echo = SMB2_echo,
5394 .query_path_info = smb2_query_path_info,
5395 .get_srv_inum = smb2_get_srv_inum,
5396 .query_file_info = smb2_query_file_info,
5397 .set_path_size = smb2_set_path_size,
5398 .set_file_size = smb2_set_file_size,
5399 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05005400 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005401 .mkdir = smb2_mkdir,
5402 .mkdir_setinfo = smb2_mkdir_setinfo,
5403 .rmdir = smb2_rmdir,
5404 .unlink = smb2_unlink,
5405 .rename = smb2_rename_path,
5406 .create_hardlink = smb2_create_hardlink,
5407 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01005408 .query_mf_symlink = smb3_query_mf_symlink,
5409 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005410 .open = smb2_open_file,
5411 .set_fid = smb2_set_fid,
5412 .close = smb2_close_file,
5413 .flush = smb2_flush_file,
5414 .async_readv = smb2_async_readv,
5415 .async_writev = smb2_async_writev,
5416 .sync_read = smb2_sync_read,
5417 .sync_write = smb2_sync_write,
5418 .query_dir_first = smb2_query_dir_first,
5419 .query_dir_next = smb2_query_dir_next,
5420 .close_dir = smb2_close_dir,
5421 .calc_smb_size = smb2_calc_size,
5422 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005423 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005424 .oplock_response = smb2_oplock_response,
5425 .queryfs = smb2_queryfs,
5426 .mand_lock = smb2_mand_lock,
5427 .mand_unlock_range = smb2_unlock_range,
5428 .push_mand_locks = smb2_push_mandatory_locks,
5429 .get_lease_key = smb2_get_lease_key,
5430 .set_lease_key = smb2_set_lease_key,
5431 .new_lease_key = smb2_new_lease_key,
5432 .calc_signature = smb2_calc_signature,
5433 .is_read_op = smb2_is_read_op,
5434 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005435 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04005436 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005437 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04005438 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04005439 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005440 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305441 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005442#ifdef CONFIG_CIFS_XATTR
5443 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005444 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005445#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05005446 .get_acl = get_smb2_acl,
5447 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05005448 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005449 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005450 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005451 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005452 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005453 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005454 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005455 .is_network_name_deleted = smb2_is_network_name_deleted,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005456};
5457
Steve French1080ef72011-02-24 18:07:19 +00005458struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07005459 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005460 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04005461 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005462 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04005463 .add_credits = smb2_add_credits,
5464 .set_credits = smb2_set_credits,
5465 .get_credits_field = smb2_get_credits_field,
5466 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04005467 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08005468 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005469 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005470 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005471 .read_data_offset = smb2_read_data_offset,
5472 .read_data_length = smb2_read_data_length,
5473 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04005474 .find_mid = smb2_find_mid,
5475 .check_message = smb2_check_message,
5476 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04005477 .clear_stats = smb2_clear_stats,
5478 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005479 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005480 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07005481 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04005482 .need_neg = smb2_need_neg,
5483 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07005484 .negotiate_wsize = smb2_negotiate_wsize,
5485 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04005486 .sess_setup = SMB2_sess_setup,
5487 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04005488 .tree_connect = SMB2_tcon,
5489 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05005490 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04005491 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04005492 .can_echo = smb2_can_echo,
5493 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04005494 .query_path_info = smb2_query_path_info,
5495 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07005496 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07005497 .set_path_size = smb2_set_path_size,
5498 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07005499 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05005500 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04005501 .mkdir = smb2_mkdir,
5502 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04005503 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07005504 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07005505 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07005506 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04005507 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05005508 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05005509 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07005510 .open = smb2_open_file,
5511 .set_fid = smb2_set_fid,
5512 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07005513 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005514 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07005515 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07005516 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07005517 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07005518 .query_dir_first = smb2_query_dir_first,
5519 .query_dir_next = smb2_query_dir_next,
5520 .close_dir = smb2_close_dir,
5521 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07005522 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005523 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005524 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005525 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005526 .mand_lock = smb2_mand_lock,
5527 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07005528 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07005529 .get_lease_key = smb2_get_lease_key,
5530 .set_lease_key = smb2_set_lease_key,
5531 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06005532 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005533 .is_read_op = smb21_is_read_op,
5534 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005535 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04005536 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005537 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04005538 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04005539 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05005540 .enum_snapshots = smb3_enum_snapshots,
Steve French2c6251a2020-02-12 22:37:08 -06005541 .notify = smb3_notify,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005542 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305543 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005544#ifdef CONFIG_CIFS_XATTR
5545 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005546 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005547#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05005548 .get_acl = get_smb2_acl,
5549 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05005550 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005551 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005552 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005553 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005554 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005555 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005556 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005557 .is_network_name_deleted = smb2_is_network_name_deleted,
Steve French38107d42012-12-08 22:08:06 -06005558};
5559
Steve French38107d42012-12-08 22:08:06 -06005560struct smb_version_operations smb30_operations = {
5561 .compare_fids = smb2_compare_fids,
5562 .setup_request = smb2_setup_request,
5563 .setup_async_request = smb2_setup_async_request,
5564 .check_receive = smb2_check_receive,
5565 .add_credits = smb2_add_credits,
5566 .set_credits = smb2_set_credits,
5567 .get_credits_field = smb2_get_credits_field,
5568 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04005569 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08005570 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06005571 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005572 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06005573 .read_data_offset = smb2_read_data_offset,
5574 .read_data_length = smb2_read_data_length,
5575 .map_error = map_smb2_to_linux_error,
5576 .find_mid = smb2_find_mid,
5577 .check_message = smb2_check_message,
5578 .dump_detail = smb2_dump_detail,
5579 .clear_stats = smb2_clear_stats,
5580 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05005581 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06005582 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005583 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07005584 .downgrade_oplock = smb3_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06005585 .need_neg = smb2_need_neg,
5586 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05005587 .negotiate_wsize = smb3_negotiate_wsize,
5588 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06005589 .sess_setup = SMB2_sess_setup,
5590 .logoff = SMB2_logoff,
5591 .tree_connect = SMB2_tcon,
5592 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005593 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06005594 .is_path_accessible = smb2_is_path_accessible,
5595 .can_echo = smb2_can_echo,
5596 .echo = SMB2_echo,
5597 .query_path_info = smb2_query_path_info,
Steve French2e4564b2020-10-22 22:03:14 -05005598 /* WSL tags introduced long after smb2.1, enable for SMB3, 3.11 only */
5599 .query_reparse_tag = smb2_query_reparse_tag,
Steve French38107d42012-12-08 22:08:06 -06005600 .get_srv_inum = smb2_get_srv_inum,
5601 .query_file_info = smb2_query_file_info,
5602 .set_path_size = smb2_set_path_size,
5603 .set_file_size = smb2_set_file_size,
5604 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05005605 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06005606 .mkdir = smb2_mkdir,
5607 .mkdir_setinfo = smb2_mkdir_setinfo,
5608 .rmdir = smb2_rmdir,
5609 .unlink = smb2_unlink,
5610 .rename = smb2_rename_path,
5611 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04005612 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05005613 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05005614 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06005615 .open = smb2_open_file,
5616 .set_fid = smb2_set_fid,
5617 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06005618 .close_getattr = smb2_close_getattr,
Steve French38107d42012-12-08 22:08:06 -06005619 .flush = smb2_flush_file,
5620 .async_readv = smb2_async_readv,
5621 .async_writev = smb2_async_writev,
5622 .sync_read = smb2_sync_read,
5623 .sync_write = smb2_sync_write,
5624 .query_dir_first = smb2_query_dir_first,
5625 .query_dir_next = smb2_query_dir_next,
5626 .close_dir = smb2_close_dir,
5627 .calc_smb_size = smb2_calc_size,
5628 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005629 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06005630 .oplock_response = smb2_oplock_response,
5631 .queryfs = smb2_queryfs,
5632 .mand_lock = smb2_mand_lock,
5633 .mand_unlock_range = smb2_unlock_range,
5634 .push_mand_locks = smb2_push_mandatory_locks,
5635 .get_lease_key = smb2_get_lease_key,
5636 .set_lease_key = smb2_set_lease_key,
5637 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06005638 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06005639 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05005640 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005641 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04005642 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005643 .create_lease_buf = smb3_create_lease_buf,
5644 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005645 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05005646 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06005647 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04005648 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04005649 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05005650 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05005651 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06005652 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07005653 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005654 .is_transform_hdr = smb3_is_transform_hdr,
5655 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005656 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305657 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005658#ifdef CONFIG_CIFS_XATTR
5659 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005660 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005661#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05005662 .get_acl = get_smb2_acl,
5663 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05005664 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005665 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005666 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005667 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005668 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005669 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005670 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005671 .is_network_name_deleted = smb2_is_network_name_deleted,
Steve French1080ef72011-02-24 18:07:19 +00005672};
5673
Steve Frenchaab18932015-06-23 23:37:11 -05005674struct smb_version_operations smb311_operations = {
5675 .compare_fids = smb2_compare_fids,
5676 .setup_request = smb2_setup_request,
5677 .setup_async_request = smb2_setup_async_request,
5678 .check_receive = smb2_check_receive,
5679 .add_credits = smb2_add_credits,
5680 .set_credits = smb2_set_credits,
5681 .get_credits_field = smb2_get_credits_field,
5682 .get_credits = smb2_get_credits,
5683 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08005684 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05005685 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005686 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05005687 .read_data_offset = smb2_read_data_offset,
5688 .read_data_length = smb2_read_data_length,
5689 .map_error = map_smb2_to_linux_error,
5690 .find_mid = smb2_find_mid,
5691 .check_message = smb2_check_message,
5692 .dump_detail = smb2_dump_detail,
5693 .clear_stats = smb2_clear_stats,
5694 .print_stats = smb2_print_stats,
5695 .dump_share_caps = smb2_dump_share_caps,
5696 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005697 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07005698 .downgrade_oplock = smb3_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05005699 .need_neg = smb2_need_neg,
5700 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05005701 .negotiate_wsize = smb3_negotiate_wsize,
5702 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05005703 .sess_setup = SMB2_sess_setup,
5704 .logoff = SMB2_logoff,
5705 .tree_connect = SMB2_tcon,
5706 .tree_disconnect = SMB2_tdis,
5707 .qfs_tcon = smb3_qfs_tcon,
5708 .is_path_accessible = smb2_is_path_accessible,
5709 .can_echo = smb2_can_echo,
5710 .echo = SMB2_echo,
5711 .query_path_info = smb2_query_path_info,
Steve French2e4564b2020-10-22 22:03:14 -05005712 .query_reparse_tag = smb2_query_reparse_tag,
Steve Frenchaab18932015-06-23 23:37:11 -05005713 .get_srv_inum = smb2_get_srv_inum,
5714 .query_file_info = smb2_query_file_info,
5715 .set_path_size = smb2_set_path_size,
5716 .set_file_size = smb2_set_file_size,
5717 .set_file_info = smb2_set_file_info,
5718 .set_compression = smb2_set_compression,
5719 .mkdir = smb2_mkdir,
5720 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05005721 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05005722 .rmdir = smb2_rmdir,
5723 .unlink = smb2_unlink,
5724 .rename = smb2_rename_path,
5725 .create_hardlink = smb2_create_hardlink,
5726 .query_symlink = smb2_query_symlink,
5727 .query_mf_symlink = smb3_query_mf_symlink,
5728 .create_mf_symlink = smb3_create_mf_symlink,
5729 .open = smb2_open_file,
5730 .set_fid = smb2_set_fid,
5731 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06005732 .close_getattr = smb2_close_getattr,
Steve Frenchaab18932015-06-23 23:37:11 -05005733 .flush = smb2_flush_file,
5734 .async_readv = smb2_async_readv,
5735 .async_writev = smb2_async_writev,
5736 .sync_read = smb2_sync_read,
5737 .sync_write = smb2_sync_write,
5738 .query_dir_first = smb2_query_dir_first,
5739 .query_dir_next = smb2_query_dir_next,
5740 .close_dir = smb2_close_dir,
5741 .calc_smb_size = smb2_calc_size,
5742 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005743 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05005744 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05005745 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05005746 .mand_lock = smb2_mand_lock,
5747 .mand_unlock_range = smb2_unlock_range,
5748 .push_mand_locks = smb2_push_mandatory_locks,
5749 .get_lease_key = smb2_get_lease_key,
5750 .set_lease_key = smb2_set_lease_key,
5751 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06005752 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05005753 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05005754 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05005755 .is_read_op = smb21_is_read_op,
5756 .set_oplock_level = smb3_set_oplock_level,
5757 .create_lease_buf = smb3_create_lease_buf,
5758 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005759 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07005760 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05005761/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
5762 .wp_retry_size = smb2_wp_retry_size,
5763 .dir_needs_close = smb2_dir_needs_close,
5764 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05005765 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06005766 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07005767 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005768 .is_transform_hdr = smb3_is_transform_hdr,
5769 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005770 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305771 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005772#ifdef CONFIG_CIFS_XATTR
5773 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005774 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005775#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10005776 .get_acl = get_smb2_acl,
5777 .get_acl_by_fid = get_smb2_acl_by_fid,
5778 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005779 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005780 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005781 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005782 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005783 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005784 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005785 .is_network_name_deleted = smb2_is_network_name_deleted,
Steve Frenchaab18932015-06-23 23:37:11 -05005786};
Steve Frenchaab18932015-06-23 23:37:11 -05005787
Steve Frenchdd446b12012-11-28 23:21:06 -06005788struct smb_version_values smb20_values = {
5789 .version_string = SMB20_VERSION_STRING,
5790 .protocol_id = SMB20_PROT_ID,
5791 .req_capabilities = 0, /* MBZ */
5792 .large_lock_type = 0,
5793 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5794 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5795 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005796 .header_size = sizeof(struct smb2_hdr),
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005797 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06005798 .max_header_size = MAX_SMB2_HDR_SIZE,
5799 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5800 .lock_cmd = SMB2_LOCK,
5801 .cap_unix = 0,
5802 .cap_nt_find = SMB2_NT_FIND,
5803 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005804 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5805 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005806 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06005807};
5808
Steve French1080ef72011-02-24 18:07:19 +00005809struct smb_version_values smb21_values = {
5810 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005811 .protocol_id = SMB21_PROT_ID,
5812 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
5813 .large_lock_type = 0,
5814 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5815 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5816 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005817 .header_size = sizeof(struct smb2_hdr),
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005818 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005819 .max_header_size = MAX_SMB2_HDR_SIZE,
5820 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5821 .lock_cmd = SMB2_LOCK,
5822 .cap_unix = 0,
5823 .cap_nt_find = SMB2_NT_FIND,
5824 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005825 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5826 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005827 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05005828};
5829
Steve French9764c022017-09-17 10:41:35 -05005830struct smb_version_values smb3any_values = {
5831 .version_string = SMB3ANY_VERSION_STRING,
5832 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005833 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005834 .large_lock_type = 0,
5835 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5836 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5837 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005838 .header_size = sizeof(struct smb2_hdr),
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005839 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005840 .max_header_size = MAX_SMB2_HDR_SIZE,
5841 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5842 .lock_cmd = SMB2_LOCK,
5843 .cap_unix = 0,
5844 .cap_nt_find = SMB2_NT_FIND,
5845 .cap_large_files = SMB2_LARGE_FILES,
5846 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5847 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5848 .create_lease_size = sizeof(struct create_lease_v2),
5849};
5850
5851struct smb_version_values smbdefault_values = {
5852 .version_string = SMBDEFAULT_VERSION_STRING,
5853 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005854 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005855 .large_lock_type = 0,
5856 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5857 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5858 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005859 .header_size = sizeof(struct smb2_hdr),
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005860 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005861 .max_header_size = MAX_SMB2_HDR_SIZE,
5862 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5863 .lock_cmd = SMB2_LOCK,
5864 .cap_unix = 0,
5865 .cap_nt_find = SMB2_NT_FIND,
5866 .cap_large_files = SMB2_LARGE_FILES,
5867 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5868 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5869 .create_lease_size = sizeof(struct create_lease_v2),
5870};
5871
Steve Frenche4aa25e2012-10-01 12:26:22 -05005872struct smb_version_values smb30_values = {
5873 .version_string = SMB30_VERSION_STRING,
5874 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005875 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07005876 .large_lock_type = 0,
5877 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5878 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5879 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005880 .header_size = sizeof(struct smb2_hdr),
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005881 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04005882 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005883 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005884 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04005885 .cap_unix = 0,
5886 .cap_nt_find = SMB2_NT_FIND,
5887 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005888 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5889 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005890 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00005891};
Steve French20b6d8b2013-06-12 22:48:41 -05005892
5893struct smb_version_values smb302_values = {
5894 .version_string = SMB302_VERSION_STRING,
5895 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005896 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05005897 .large_lock_type = 0,
5898 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5899 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5900 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005901 .header_size = sizeof(struct smb2_hdr),
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005902 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05005903 .max_header_size = MAX_SMB2_HDR_SIZE,
5904 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5905 .lock_cmd = SMB2_LOCK,
5906 .cap_unix = 0,
5907 .cap_nt_find = SMB2_NT_FIND,
5908 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005909 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5910 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005911 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05005912};
Steve French5f7fbf72014-12-17 22:52:58 -06005913
Steve French5f7fbf72014-12-17 22:52:58 -06005914struct smb_version_values smb311_values = {
5915 .version_string = SMB311_VERSION_STRING,
5916 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005917 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06005918 .large_lock_type = 0,
5919 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5920 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5921 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg0d35e382021-11-05 08:39:01 +09005922 .header_size = sizeof(struct smb2_hdr),
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005923 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06005924 .max_header_size = MAX_SMB2_HDR_SIZE,
5925 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5926 .lock_cmd = SMB2_LOCK,
5927 .cap_unix = 0,
5928 .cap_nt_find = SMB2_NT_FIND,
5929 .cap_large_files = SMB2_LARGE_FILES,
5930 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5931 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5932 .create_lease_size = sizeof(struct create_lease_v2),
5933};