blob: 3ff4600357059abaa1828b79f8e8e96f03a82c32 [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Steve French1080ef72011-02-24 18:07:19 +00006 */
7
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07008#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07009#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050010#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070011#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020012#include <linux/uuid.h>
Aurelien Aptel35adffe2019-09-20 06:29:39 +020013#include <linux/sort.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070014#include <crypto/aead.h>
Christoph Hellwig10c5db22020-05-23 09:30:11 +020015#include <linux/fiemap.h>
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +100016#include "cifsfs.h"
Steve French1080ef72011-02-24 18:07:19 +000017#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040018#include "smb2pdu.h"
19#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040020#include "cifsproto.h"
21#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040022#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070023#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070024#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050025#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070026#include "smbdirect.h"
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -060027#include "fs_context.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040028
Pavel Shilovskyef68e832019-01-18 17:25:36 -080029/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040030static int
31change_conf(struct TCP_Server_Info *server)
32{
33 server->credits += server->echo_credits + server->oplock_credits;
34 server->oplock_credits = server->echo_credits = 0;
35 switch (server->credits) {
36 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080037 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040038 case 1:
39 server->echoes = false;
40 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040041 break;
42 case 2:
43 server->echoes = true;
44 server->oplocks = false;
45 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040046 break;
47 default:
48 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050049 if (enable_oplocks) {
50 server->oplocks = true;
51 server->oplock_credits = 1;
52 } else
53 server->oplocks = false;
54
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040055 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040056 }
57 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080058 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040059}
60
61static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080062smb2_add_credits(struct TCP_Server_Info *server,
63 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040064{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080065 int *val, rc = -1;
Shyam Prasad N6d82c272021-02-03 23:20:46 -080066 int scredits, in_flight;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080067 unsigned int add = credits->value;
68 unsigned int instance = credits->instance;
69 bool reconnect_detected = false;
Shyam Prasad N6d82c272021-02-03 23:20:46 -080070 bool reconnect_with_invalid_credits = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080071
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040072 spin_lock(&server->req_lock);
73 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050074
75 /* eg found case where write overlapping reconnect messed up credits */
76 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
Shyam Prasad N6d82c272021-02-03 23:20:46 -080077 reconnect_with_invalid_credits = true;
78
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080079 if ((instance == 0) || (instance == server->reconnect_instance))
80 *val += add;
81 else
82 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050083
Steve French141891f2016-09-23 00:44:16 -050084 if (*val > 65000) {
85 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
Joe Perchesa0a30362020-04-14 22:42:53 -070086 pr_warn_once("server overflowed SMB3 credits\n");
Steve French141891f2016-09-23 00:44:16 -050087 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040088 server->in_flight--;
Shyam Prasad N0f56db82021-02-03 22:49:52 -080089 if (server->in_flight == 0 &&
90 ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) &&
91 ((optype & CIFS_OP_MASK) != CIFS_SESS_OP))
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040092 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070093 /*
94 * Sometimes server returns 0 credits on oplock break ack - we need to
95 * rebalance credits in this case.
96 */
97 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
98 server->oplocks) {
99 if (server->credits > 1) {
100 server->credits--;
101 server->oplock_credits++;
102 }
103 }
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800104 scredits = *val;
105 in_flight = server->in_flight;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400106 spin_unlock(&server->req_lock);
107 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800108
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800109 if (reconnect_detected) {
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800110 trace_smb3_reconnect_detected(server->CurrentMid,
111 server->conn_id, server->hostname, scredits, add, in_flight);
112
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800113 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
114 add, instance);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800115 }
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800116
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800117 if (reconnect_with_invalid_credits) {
118 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
119 server->conn_id, server->hostname, scredits, add, in_flight);
120 cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
121 optype, scredits, add);
122 }
123
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800124 if (server->tcpStatus == CifsNeedReconnect
125 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800126 return;
127
128 switch (rc) {
129 case -1:
130 /* change_conf hasn't been executed */
131 break;
132 case 0:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000133 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800134 break;
135 case 1:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000136 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800137 break;
138 case 2:
139 cifs_dbg(FYI, "disabling oplocks\n");
140 break;
141 default:
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800142 /* change_conf rebalanced credits for different types */
143 break;
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800144 }
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800145
146 trace_smb3_add_credits(server->CurrentMid,
147 server->conn_id, server->hostname, scredits, add, in_flight);
148 cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400149}
150
151static void
152smb2_set_credits(struct TCP_Server_Info *server, const int val)
153{
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800154 int scredits, in_flight;
155
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400156 spin_lock(&server->req_lock);
157 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500158 if (val == 1)
159 server->reconnect_instance++;
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800160 scredits = server->credits;
161 in_flight = server->in_flight;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400162 spin_unlock(&server->req_lock);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800163
164 trace_smb3_set_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800165 server->conn_id, server->hostname, scredits, val, in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800166 cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
167
Steve French6e4d3bb2018-09-22 11:25:04 -0500168 /* don't log while holding the lock */
169 if (val == 1)
170 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400171}
172
173static int *
174smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
175{
176 switch (optype) {
177 case CIFS_ECHO_OP:
178 return &server->echo_credits;
179 case CIFS_OBREAK_OP:
180 return &server->oplock_credits;
181 default:
182 return &server->credits;
183 }
184}
185
186static unsigned int
187smb2_get_credits(struct mid_q_entry *mid)
188{
Pavel Shilovsky86a79642019-11-21 11:35:13 -0800189 return mid->credits_received;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400190}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400191
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400192static int
193smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800194 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400195{
196 int rc = 0;
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800197 unsigned int scredits, in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400198
199 spin_lock(&server->req_lock);
200 while (1) {
201 if (server->credits <= 0) {
202 spin_unlock(&server->req_lock);
203 cifs_num_waiters_inc(server);
204 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000205 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400206 cifs_num_waiters_dec(server);
207 if (rc)
208 return rc;
209 spin_lock(&server->req_lock);
210 } else {
211 if (server->tcpStatus == CifsExiting) {
212 spin_unlock(&server->req_lock);
213 return -ENOENT;
214 }
215
216 scredits = server->credits;
217 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800218 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400219 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800220 credits->value = 0;
221 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400222 break;
223 }
224
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800225 /* leave some credits for reopen and other ops */
226 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400227 *num = min_t(unsigned int, size,
228 scredits * SMB2_MAX_BUFFER_SIZE);
229
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800230 credits->value =
231 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
232 credits->instance = server->reconnect_instance;
233 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400234 server->in_flight++;
Steve French1b63f182019-09-09 22:57:11 -0500235 if (server->in_flight > server->max_in_flight)
236 server->max_in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400237 break;
238 }
239 }
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800240 scredits = server->credits;
241 in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400242 spin_unlock(&server->req_lock);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800243
244 trace_smb3_add_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800245 server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800246 cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
247 __func__, credits->value, scredits);
248
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400249 return rc;
250}
251
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800252static int
253smb2_adjust_credits(struct TCP_Server_Info *server,
254 struct cifs_credits *credits,
255 const unsigned int payload_size)
256{
257 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800258 int scredits, in_flight;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800259
260 if (!credits->value || credits->value == new_val)
261 return 0;
262
263 if (credits->value < new_val) {
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800264 trace_smb3_too_many_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800265 server->conn_id, server->hostname, 0, credits->value - new_val, 0);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800266 cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
267 credits->value, new_val);
268
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800269 return -ENOTSUPP;
270 }
271
272 spin_lock(&server->req_lock);
273
274 if (server->reconnect_instance != credits->instance) {
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800275 scredits = server->credits;
276 in_flight = server->in_flight;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800277 spin_unlock(&server->req_lock);
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800278
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800279 trace_smb3_reconnect_detected(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800280 server->conn_id, server->hostname, scredits,
281 credits->value - new_val, in_flight);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000282 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800283 credits->value - new_val);
284 return -EAGAIN;
285 }
286
287 server->credits += credits->value - new_val;
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800288 scredits = server->credits;
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800289 in_flight = server->in_flight;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800290 spin_unlock(&server->req_lock);
291 wake_up(&server->request_q);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800292
293 trace_smb3_add_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800294 server->conn_id, server->hostname, scredits,
295 credits->value - new_val, in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800296 cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
297 __func__, credits->value - new_val, scredits);
298
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800299 credits->value = new_val;
300
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800301 return 0;
302}
303
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400304static __u64
305smb2_get_next_mid(struct TCP_Server_Info *server)
306{
307 __u64 mid;
308 /* for SMB2 we need the current value */
309 spin_lock(&GlobalMid_Lock);
310 mid = server->CurrentMid++;
311 spin_unlock(&GlobalMid_Lock);
312 return mid;
313}
Steve French1080ef72011-02-24 18:07:19 +0000314
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800315static void
316smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
317{
318 spin_lock(&GlobalMid_Lock);
319 if (server->CurrentMid >= val)
320 server->CurrentMid -= val;
321 spin_unlock(&GlobalMid_Lock);
322}
323
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400324static struct mid_q_entry *
Rohith Surabattulaac873aa2020-10-29 05:03:10 +0000325__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400326{
327 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000328 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700329 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400330
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700331 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000332 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600333 return NULL;
334 }
335
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400336 spin_lock(&GlobalMid_Lock);
337 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000338 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400339 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700340 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200341 kref_get(&mid->refcount);
Rohith Surabattulaac873aa2020-10-29 05:03:10 +0000342 if (dequeue) {
343 list_del_init(&mid->qhead);
344 mid->mid_flags |= MID_DELETED;
345 }
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400346 spin_unlock(&GlobalMid_Lock);
347 return mid;
348 }
349 }
350 spin_unlock(&GlobalMid_Lock);
351 return NULL;
352}
353
Rohith Surabattulaac873aa2020-10-29 05:03:10 +0000354static struct mid_q_entry *
355smb2_find_mid(struct TCP_Server_Info *server, char *buf)
356{
357 return __smb2_find_mid(server, buf, false);
358}
359
360static struct mid_q_entry *
361smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
362{
363 return __smb2_find_mid(server, buf, true);
364}
365
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400366static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600367smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400368{
369#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000370 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400371
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000372 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700373 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
374 shdr->ProcessId);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000375 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500376 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400377#endif
378}
379
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400380static bool
381smb2_need_neg(struct TCP_Server_Info *server)
382{
383 return server->max_read == 0;
384}
385
386static int
387smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
388{
389 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200390
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200391 cifs_ses_server(ses)->CurrentMid = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400392 rc = SMB2_negotiate(xid, ses);
393 /* BB we probably don't need to retry with modern servers */
394 if (rc == -EAGAIN)
395 rc = -EHOSTDOWN;
396 return rc;
397}
398
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700399static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600400smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700401{
402 struct TCP_Server_Info *server = tcon->ses->server;
403 unsigned int wsize;
404
405 /* start with specified wsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600406 wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE;
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700407 wsize = min_t(unsigned int, wsize, server->max_write);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400408 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
409 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700410
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700411 return wsize;
412}
413
414static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600415smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Steve French3d621232018-09-25 15:33:47 -0500416{
417 struct TCP_Server_Info *server = tcon->ses->server;
418 unsigned int wsize;
419
420 /* start with specified wsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600421 wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE;
Steve French3d621232018-09-25 15:33:47 -0500422 wsize = min_t(unsigned int, wsize, server->max_write);
423#ifdef CONFIG_CIFS_SMB_DIRECT
424 if (server->rdma) {
425 if (server->sign)
Long Lif7950cb2020-03-26 19:42:24 -0700426 /*
427 * Account for SMB2 data transfer packet header and
428 * possible encryption header
429 */
Steve French3d621232018-09-25 15:33:47 -0500430 wsize = min_t(unsigned int,
Long Lif7950cb2020-03-26 19:42:24 -0700431 wsize,
432 server->smbd_conn->max_fragmented_send_size -
433 SMB2_READWRITE_PDU_HEADER_SIZE -
434 sizeof(struct smb2_transform_hdr));
Steve French3d621232018-09-25 15:33:47 -0500435 else
436 wsize = min_t(unsigned int,
437 wsize, server->smbd_conn->max_readwrite_size);
438 }
439#endif
440 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
441 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
442
443 return wsize;
444}
445
446static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600447smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700448{
449 struct TCP_Server_Info *server = tcon->ses->server;
450 unsigned int rsize;
451
452 /* start with specified rsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600453 rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE;
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700454 rsize = min_t(unsigned int, rsize, server->max_read);
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400455
456 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
457 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700458
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700459 return rsize;
460}
461
Steve French3d621232018-09-25 15:33:47 -0500462static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600463smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Steve French3d621232018-09-25 15:33:47 -0500464{
465 struct TCP_Server_Info *server = tcon->ses->server;
466 unsigned int rsize;
467
468 /* start with specified rsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600469 rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE;
Steve French3d621232018-09-25 15:33:47 -0500470 rsize = min_t(unsigned int, rsize, server->max_read);
471#ifdef CONFIG_CIFS_SMB_DIRECT
472 if (server->rdma) {
473 if (server->sign)
Long Lif7950cb2020-03-26 19:42:24 -0700474 /*
475 * Account for SMB2 data transfer packet header and
476 * possible encryption header
477 */
Steve French3d621232018-09-25 15:33:47 -0500478 rsize = min_t(unsigned int,
Long Lif7950cb2020-03-26 19:42:24 -0700479 rsize,
480 server->smbd_conn->max_fragmented_recv_size -
481 SMB2_READWRITE_PDU_HEADER_SIZE -
482 sizeof(struct smb2_transform_hdr));
Steve French3d621232018-09-25 15:33:47 -0500483 else
484 rsize = min_t(unsigned int,
485 rsize, server->smbd_conn->max_readwrite_size);
486 }
487#endif
488
489 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
490 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
491
492 return rsize;
493}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200494
495static int
496parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
497 size_t buf_len,
498 struct cifs_server_iface **iface_list,
499 size_t *iface_count)
500{
501 struct network_interface_info_ioctl_rsp *p;
502 struct sockaddr_in *addr4;
503 struct sockaddr_in6 *addr6;
504 struct iface_info_ipv4 *p4;
505 struct iface_info_ipv6 *p6;
506 struct cifs_server_iface *info;
507 ssize_t bytes_left;
508 size_t next = 0;
509 int nb_iface = 0;
510 int rc = 0;
511
512 *iface_list = NULL;
513 *iface_count = 0;
514
515 /*
516 * Fist pass: count and sanity check
517 */
518
519 bytes_left = buf_len;
520 p = buf;
521 while (bytes_left >= sizeof(*p)) {
522 nb_iface++;
523 next = le32_to_cpu(p->Next);
524 if (!next) {
525 bytes_left -= sizeof(*p);
526 break;
527 }
528 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
529 bytes_left -= next;
530 }
531
532 if (!nb_iface) {
533 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
534 rc = -EINVAL;
535 goto out;
536 }
537
Steve Frenchebcd6de2020-12-08 21:13:31 -0600538 /* Azure rounds the buffer size up 8, to a 16 byte boundary */
539 if ((bytes_left > 8) || p->Next)
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200540 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
541
542
543 /*
544 * Second pass: extract info to internal structure
545 */
546
547 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
548 if (!*iface_list) {
549 rc = -ENOMEM;
550 goto out;
551 }
552
553 info = *iface_list;
554 bytes_left = buf_len;
555 p = buf;
556 while (bytes_left >= sizeof(*p)) {
557 info->speed = le64_to_cpu(p->LinkSpeed);
558 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
559 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
560
561 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
562 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
563 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
564 le32_to_cpu(p->Capability));
565
566 switch (p->Family) {
567 /*
568 * The kernel and wire socket structures have the same
569 * layout and use network byte order but make the
570 * conversion explicit in case either one changes.
571 */
572 case INTERNETWORK:
573 addr4 = (struct sockaddr_in *)&info->sockaddr;
574 p4 = (struct iface_info_ipv4 *)p->Buffer;
575 addr4->sin_family = AF_INET;
576 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
577
578 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
579 addr4->sin_port = cpu_to_be16(CIFS_PORT);
580
581 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
582 &addr4->sin_addr);
583 break;
584 case INTERNETWORKV6:
585 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
586 p6 = (struct iface_info_ipv6 *)p->Buffer;
587 addr6->sin6_family = AF_INET6;
588 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
589
590 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
591 addr6->sin6_flowinfo = 0;
592 addr6->sin6_scope_id = 0;
593 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
594
595 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
596 &addr6->sin6_addr);
597 break;
598 default:
599 cifs_dbg(VFS,
600 "%s: skipping unsupported socket family\n",
601 __func__);
602 goto next_iface;
603 }
604
605 (*iface_count)++;
606 info++;
607next_iface:
608 next = le32_to_cpu(p->Next);
609 if (!next)
610 break;
611 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
612 bytes_left -= next;
613 }
614
615 if (!*iface_count) {
616 rc = -EINVAL;
617 goto out;
618 }
619
620out:
621 if (rc) {
622 kfree(*iface_list);
623 *iface_count = 0;
624 *iface_list = NULL;
625 }
626 return rc;
627}
628
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200629static int compare_iface(const void *ia, const void *ib)
630{
631 const struct cifs_server_iface *a = (struct cifs_server_iface *)ia;
632 const struct cifs_server_iface *b = (struct cifs_server_iface *)ib;
633
634 return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1);
635}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200636
Steve Frenchc481e9f2013-10-14 01:21:53 -0500637static int
638SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
639{
640 int rc;
641 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200642 struct network_interface_info_ioctl_rsp *out_buf = NULL;
643 struct cifs_server_iface *iface_list;
644 size_t iface_count;
645 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500646
647 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
648 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
649 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500650 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500651 if (rc == -EOPNOTSUPP) {
652 cifs_dbg(FYI,
653 "server does not support query network interfaces\n");
654 goto out;
655 } else if (rc != 0) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000656 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200657 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500658 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200659
660 rc = parse_server_interfaces(out_buf, ret_data_len,
661 &iface_list, &iface_count);
662 if (rc)
663 goto out;
664
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200665 /* sort interfaces from fastest to slowest */
666 sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL);
667
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200668 spin_lock(&ses->iface_lock);
669 kfree(ses->iface_list);
670 ses->iface_list = iface_list;
671 ses->iface_count = iface_count;
672 ses->iface_last_update = jiffies;
673 spin_unlock(&ses->iface_lock);
674
675out:
Steve French24df1482016-09-29 04:20:23 -0500676 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500677 return rc;
678}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500679
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000680static void
681smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000682{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000683 struct cached_fid *cfid = container_of(ref, struct cached_fid,
684 refcount);
685
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000686 if (cfid->is_valid) {
687 cifs_dbg(FYI, "clear cached root file handle\n");
688 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
689 cfid->fid->volatile_fid);
690 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000691 cfid->file_all_info_is_valid = false;
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800692 cfid->has_lease = false;
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000693 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000694}
695
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000696void close_cached_dir(struct cached_fid *cfid)
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000697{
698 mutex_lock(&cfid->fid_mutex);
699 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000700 mutex_unlock(&cfid->fid_mutex);
701}
702
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000703void close_cached_dir_lease_locked(struct cached_fid *cfid)
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800704{
705 if (cfid->has_lease) {
706 cfid->has_lease = false;
707 kref_put(&cfid->refcount, smb2_close_cached_fid);
708 }
709}
710
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000711void close_cached_dir_lease(struct cached_fid *cfid)
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800712{
713 mutex_lock(&cfid->fid_mutex);
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000714 close_cached_dir_lease_locked(cfid);
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800715 mutex_unlock(&cfid->fid_mutex);
716}
717
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000718void
719smb2_cached_lease_break(struct work_struct *work)
720{
721 struct cached_fid *cfid = container_of(work,
722 struct cached_fid, lease_break);
723
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000724 close_cached_dir_lease(cfid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000725}
726
Steve French3d4ef9a2018-04-25 22:19:09 -0500727/*
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000728 * Open the and cache a directory handle.
729 * Only supported for the root handle.
Steve French3d4ef9a2018-04-25 22:19:09 -0500730 */
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000731int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlberge6eb1952021-03-09 09:07:28 +1000732 const char *path,
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000733 struct cifs_sb_info *cifs_sb,
734 struct cached_fid **cfid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500735{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000736 struct cifs_ses *ses = tcon->ses;
737 struct TCP_Server_Info *server = ses->server;
738 struct cifs_open_parms oparms;
739 struct smb2_create_rsp *o_rsp = NULL;
740 struct smb2_query_info_rsp *qi_rsp = NULL;
741 int resp_buftype[2];
742 struct smb_rqst rqst[2];
743 struct kvec rsp_iov[2];
744 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
745 struct kvec qi_iov[1];
746 int rc, flags = 0;
747 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000748 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000749 struct cifs_fid *pfid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500750
Ronnie Sahlberg4df3d972021-03-09 09:07:27 +1000751 if (tcon->nohandlecache)
752 return -ENOTSUPP;
753
Ronnie Sahlberg269f67e2021-03-09 09:07:30 +1000754 if (cifs_sb->root == NULL)
755 return -ENOENT;
756
Ronnie Sahlberge6eb1952021-03-09 09:07:28 +1000757 if (strlen(path))
Ronnie Sahlberg269f67e2021-03-09 09:07:30 +1000758 return -ENOENT;
Ronnie Sahlberge6eb1952021-03-09 09:07:28 +1000759
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000760 mutex_lock(&tcon->crfid.fid_mutex);
761 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500762 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000763 *cfid = &tcon->crfid;
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000764 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000765 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500766 return 0;
767 }
768
Steve French96d9f7e2019-09-12 17:52:54 -0500769 /*
770 * We do not hold the lock for the open because in case
771 * SMB2_open needs to reconnect, it will end up calling
772 * cifs_mark_open_files_invalid() which takes the lock again
773 * thus causing a deadlock
774 */
775
776 mutex_unlock(&tcon->crfid.fid_mutex);
777
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000778 if (smb3_encryption_required(tcon))
779 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500780
Paulo Alcantara0fe07812020-04-20 23:44:24 -0300781 if (!server->ops->new_lease_key)
782 return -EIO;
783
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000784 pfid = tcon->crfid.fid;
Paulo Alcantara0fe07812020-04-20 23:44:24 -0300785 server->ops->new_lease_key(pfid);
786
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000787 memset(rqst, 0, sizeof(rqst));
788 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
789 memset(rsp_iov, 0, sizeof(rsp_iov));
790
791 /* Open */
792 memset(&open_iov, 0, sizeof(open_iov));
793 rqst[0].rq_iov = open_iov;
794 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
795
796 oparms.tcon = tcon;
Amir Goldstein0f060932020-02-03 21:46:43 +0200797 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000798 oparms.desired_access = FILE_READ_ATTRIBUTES;
799 oparms.disposition = FILE_OPEN;
800 oparms.fid = pfid;
801 oparms.reconnect = false;
802
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500803 rc = SMB2_open_init(tcon, server,
804 &rqst[0], &oplock, &oparms, &utf16_path);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000805 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500806 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000807 smb2_set_next_command(tcon, &rqst[0]);
808
809 memset(&qi_iov, 0, sizeof(qi_iov));
810 rqst[1].rq_iov = qi_iov;
811 rqst[1].rq_nvec = 1;
812
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500813 rc = SMB2_query_info_init(tcon, server,
814 &rqst[1], COMPOUND_FID,
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000815 COMPOUND_FID, FILE_ALL_INFORMATION,
816 SMB2_O_INFO_FILE, 0,
817 sizeof(struct smb2_file_all_info) +
818 PATH_MAX * 2, 0, NULL);
819 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500820 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000821
822 smb2_set_related(&rqst[1]);
823
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500824 rc = compound_send_recv(xid, ses, server,
825 flags, 2, rqst,
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000826 resp_buftype, rsp_iov);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200827 mutex_lock(&tcon->crfid.fid_mutex);
828
829 /*
830 * Now we need to check again as the cached root might have
831 * been successfully re-opened from a concurrent process
832 */
833
834 if (tcon->crfid.is_valid) {
835 /* work was already done */
836
837 /* stash fids for close() later */
838 struct cifs_fid fid = {
839 .persistent_fid = pfid->persistent_fid,
840 .volatile_fid = pfid->volatile_fid,
841 };
842
843 /*
844 * caller expects this func to set pfid to a valid
845 * cached root, so we copy the existing one and get a
846 * reference.
847 */
848 memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
849 kref_get(&tcon->crfid.refcount);
850
851 mutex_unlock(&tcon->crfid.fid_mutex);
852
853 if (rc == 0) {
854 /* close extra handle outside of crit sec */
855 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
856 }
Xiyu Yang77577de2020-06-13 20:27:09 +0800857 rc = 0;
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200858 goto oshr_free;
859 }
860
861 /* Cached root is still invalid, continue normaly */
862
Steve French7dcc82c2019-09-11 00:07:36 -0500863 if (rc) {
864 if (rc == -EREMCHG) {
865 tcon->need_reconnect = true;
Joe Perchesa0a30362020-04-14 22:42:53 -0700866 pr_warn_once("server share %s deleted\n",
867 tcon->treeName);
Steve French7dcc82c2019-09-11 00:07:36 -0500868 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000869 goto oshr_exit;
Steve French7dcc82c2019-09-11 00:07:36 -0500870 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000871
Steve Frenchd2f15422019-09-22 00:55:46 -0500872 atomic_inc(&tcon->num_remote_opens);
873
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000874 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
875 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
876 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
877#ifdef CONFIG_CIFS_DEBUG2
878 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
879#endif /* CIFS_DEBUG2 */
880
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000881 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
882 tcon->crfid.tcon = tcon;
883 tcon->crfid.is_valid = true;
884 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000885
Steve French89a5bfa2019-07-18 17:22:18 -0500886 /* BB TBD check to see if oplock level check can be removed below */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000887 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
888 kref_get(&tcon->crfid.refcount);
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800889 tcon->crfid.has_lease = true;
Steve French89a5bfa2019-07-18 17:22:18 -0500890 smb2_parse_contexts(server, o_rsp,
891 &oparms.fid->epoch,
Aurelien Aptel69dda302020-03-02 17:53:22 +0100892 oparms.fid->lease_key, &oplock,
893 NULL, NULL);
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000894 } else
895 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000896
897 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
898 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
899 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000900 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000901 le16_to_cpu(qi_rsp->OutputBufferOffset),
902 sizeof(struct smb2_file_all_info),
903 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000904 (char *)&tcon->crfid.file_all_info))
zhengbin720aec02019-12-25 11:30:20 +0800905 tcon->crfid.file_all_info_is_valid = true;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000906
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200907oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000908 mutex_unlock(&tcon->crfid.fid_mutex);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200909oshr_free:
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000910 SMB2_open_free(&rqst[0]);
911 SMB2_query_info_free(&rqst[1]);
912 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
913 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000914 if (rc == 0)
915 *cfid = &tcon->crfid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500916 return rc;
917}
918
Steve French34f62642013-10-09 02:07:00 -0500919static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200920smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
921 struct cifs_sb_info *cifs_sb)
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500922{
923 int rc;
924 __le16 srch_path = 0; /* Null - open root of share */
925 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
926 struct cifs_open_parms oparms;
927 struct cifs_fid fid;
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000928 struct cached_fid *cfid = NULL;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500929
930 oparms.tcon = tcon;
931 oparms.desired_access = FILE_READ_ATTRIBUTES;
932 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200933 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500934 oparms.fid = &fid;
935 oparms.reconnect = false;
936
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000937 rc = open_cached_dir(xid, tcon, "", cifs_sb, &cfid);
Ronnie Sahlberg4df3d972021-03-09 09:07:27 +1000938 if (rc == 0)
939 memcpy(&fid, cfid->fid, sizeof(struct cifs_fid));
940 else
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000941 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
Aurelien Aptel69dda302020-03-02 17:53:22 +0100942 NULL, NULL);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500943 if (rc)
944 return;
945
Steve Frenchc481e9f2013-10-14 01:21:53 -0500946 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500947
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500948 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
949 FS_ATTRIBUTE_INFORMATION);
950 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
951 FS_DEVICE_INFORMATION);
952 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500953 FS_VOLUME_INFORMATION);
954 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500955 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Ronnie Sahlberg4df3d972021-03-09 09:07:27 +1000956 if (cfid == NULL)
Steve French3d4ef9a2018-04-25 22:19:09 -0500957 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000958 else
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000959 close_cached_dir(cfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500960}
961
962static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200963smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
964 struct cifs_sb_info *cifs_sb)
Steve French34f62642013-10-09 02:07:00 -0500965{
966 int rc;
967 __le16 srch_path = 0; /* Null - open root of share */
968 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
969 struct cifs_open_parms oparms;
970 struct cifs_fid fid;
971
972 oparms.tcon = tcon;
973 oparms.desired_access = FILE_READ_ATTRIBUTES;
974 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200975 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French34f62642013-10-09 02:07:00 -0500976 oparms.fid = &fid;
977 oparms.reconnect = false;
978
Aurelien Aptel69dda302020-03-02 17:53:22 +0100979 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
980 NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -0500981 if (rc)
982 return;
983
Steven French21671142013-10-09 13:36:35 -0500984 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
985 FS_ATTRIBUTE_INFORMATION);
986 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
987 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -0500988 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -0500989}
990
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400991static int
992smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
993 struct cifs_sb_info *cifs_sb, const char *full_path)
994{
995 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400996 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700997 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +0400998 struct cifs_open_parms oparms;
999 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001000
Ronnie Sahlberga93864d2018-06-14 06:48:35 +10001001 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -05001002 return 0;
1003
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001004 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
1005 if (!utf16_path)
1006 return -ENOMEM;
1007
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001008 oparms.tcon = tcon;
1009 oparms.desired_access = FILE_READ_ATTRIBUTES;
1010 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001011 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001012 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001013 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001014
Aurelien Aptel69dda302020-03-02 17:53:22 +01001015 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
1016 NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001017 if (rc) {
1018 kfree(utf16_path);
1019 return rc;
1020 }
1021
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001022 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001023 kfree(utf16_path);
1024 return rc;
1025}
1026
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001027static int
1028smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
1029 struct cifs_sb_info *cifs_sb, const char *full_path,
1030 u64 *uniqueid, FILE_ALL_INFO *data)
1031{
1032 *uniqueid = le64_to_cpu(data->IndexNumber);
1033 return 0;
1034}
1035
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07001036static int
1037smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
1038 struct cifs_fid *fid, FILE_ALL_INFO *data)
1039{
1040 int rc;
1041 struct smb2_file_all_info *smb2_data;
1042
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +04001043 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07001044 GFP_KERNEL);
1045 if (smb2_data == NULL)
1046 return -ENOMEM;
1047
1048 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
1049 smb2_data);
1050 if (!rc)
1051 move_smb2_info_to_cifs(data, smb2_data);
1052 kfree(smb2_data);
1053 return rc;
1054}
1055
Arnd Bergmann1368f152017-09-05 11:24:15 +02001056#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001057static ssize_t
1058move_smb2_ea_to_cifs(char *dst, size_t dst_size,
1059 struct smb2_file_full_ea_info *src, size_t src_size,
1060 const unsigned char *ea_name)
1061{
1062 int rc = 0;
1063 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
1064 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001065 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001066 size_t name_len, value_len, user_name_len;
1067
1068 while (src_size > 0) {
1069 name = &src->ea_data[0];
1070 name_len = (size_t)src->ea_name_length;
1071 value = &src->ea_data[src->ea_name_length + 1];
1072 value_len = (size_t)le16_to_cpu(src->ea_value_length);
1073
Christoph Probsta205d502019-05-08 21:36:25 +02001074 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001075 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001076
1077 if (src_size < 8 + name_len + 1 + value_len) {
1078 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
1079 rc = -EIO;
1080 goto out;
1081 }
1082
1083 if (ea_name) {
1084 if (ea_name_len == name_len &&
1085 memcmp(ea_name, name, name_len) == 0) {
1086 rc = value_len;
1087 if (dst_size == 0)
1088 goto out;
1089 if (dst_size < value_len) {
1090 rc = -ERANGE;
1091 goto out;
1092 }
1093 memcpy(dst, value, value_len);
1094 goto out;
1095 }
1096 } else {
1097 /* 'user.' plus a terminating null */
1098 user_name_len = 5 + 1 + name_len;
1099
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001100 if (buf_size == 0) {
1101 /* skip copy - calc size only */
1102 rc += user_name_len;
1103 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001104 dst_size -= user_name_len;
1105 memcpy(dst, "user.", 5);
1106 dst += 5;
1107 memcpy(dst, src->ea_data, name_len);
1108 dst += name_len;
1109 *dst = 0;
1110 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001111 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001112 } else {
1113 /* stop before overrun buffer */
1114 rc = -ERANGE;
1115 break;
1116 }
1117 }
1118
1119 if (!src->next_entry_offset)
1120 break;
1121
1122 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1123 /* stop before overrun buffer */
1124 rc = -ERANGE;
1125 break;
1126 }
1127 src_size -= le32_to_cpu(src->next_entry_offset);
1128 src = (void *)((char *)src +
1129 le32_to_cpu(src->next_entry_offset));
1130 }
1131
1132 /* didn't find the named attribute */
1133 if (ea_name)
1134 rc = -ENODATA;
1135
1136out:
1137 return (ssize_t)rc;
1138}
1139
1140static ssize_t
1141smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1142 const unsigned char *path, const unsigned char *ea_name,
1143 char *ea_data, size_t buf_size,
1144 struct cifs_sb_info *cifs_sb)
1145{
1146 int rc;
1147 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001148 struct kvec rsp_iov = {NULL, 0};
1149 int buftype = CIFS_NO_BUFFER;
1150 struct smb2_query_info_rsp *rsp;
1151 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001152
1153 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1154 if (!utf16_path)
1155 return -ENOMEM;
1156
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001157 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1158 FILE_READ_EA,
1159 FILE_FULL_EA_INFORMATION,
1160 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +10001161 CIFSMaxBufSize -
1162 MAX_SMB2_CREATE_RESPONSE_SIZE -
1163 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001164 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001165 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001166 /*
1167 * If ea_name is NULL (listxattr) and there are no EAs,
1168 * return 0 as it's not an error. Otherwise, the specified
1169 * ea_name was not found.
1170 */
1171 if (!ea_name && rc == -ENODATA)
1172 rc = 0;
1173 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001174 }
1175
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001176 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1177 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1178 le32_to_cpu(rsp->OutputBufferLength),
1179 &rsp_iov,
1180 sizeof(struct smb2_file_full_ea_info));
1181 if (rc)
1182 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001183
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001184 info = (struct smb2_file_full_ea_info *)(
1185 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1186 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1187 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001188
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001189 qeas_exit:
1190 kfree(utf16_path);
1191 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001192 return rc;
1193}
1194
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001195
1196static int
1197smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1198 const char *path, const char *ea_name, const void *ea_value,
1199 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1200 struct cifs_sb_info *cifs_sb)
1201{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001202 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001203 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001204 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001205 int ea_name_len = strlen(ea_name);
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03001206 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001207 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001208 struct smb_rqst rqst[3];
1209 int resp_buftype[3];
1210 struct kvec rsp_iov[3];
1211 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1212 struct cifs_open_parms oparms;
1213 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1214 struct cifs_fid fid;
1215 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1216 unsigned int size[1];
1217 void *data[1];
1218 struct smb2_file_full_ea_info *ea = NULL;
1219 struct kvec close_iov[1];
Ronnie Sahlberg85db6b72020-02-13 12:14:47 +10001220 struct smb2_query_info_rsp *rsp;
1221 int rc, used_len = 0;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001222
1223 if (smb3_encryption_required(tcon))
1224 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001225
1226 if (ea_name_len > 255)
1227 return -EINVAL;
1228
1229 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1230 if (!utf16_path)
1231 return -ENOMEM;
1232
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001233 memset(rqst, 0, sizeof(rqst));
1234 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1235 memset(rsp_iov, 0, sizeof(rsp_iov));
1236
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001237 if (ses->server->ops->query_all_EAs) {
1238 if (!ea_value) {
1239 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1240 ea_name, NULL, 0,
1241 cifs_sb);
1242 if (rc == -ENODATA)
1243 goto sea_exit;
Ronnie Sahlberg85db6b72020-02-13 12:14:47 +10001244 } else {
1245 /* If we are adding a attribute we should first check
1246 * if there will be enough space available to store
1247 * the new EA. If not we should not add it since we
1248 * would not be able to even read the EAs back.
1249 */
1250 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1251 FILE_READ_EA,
1252 FILE_FULL_EA_INFORMATION,
1253 SMB2_O_INFO_FILE,
1254 CIFSMaxBufSize -
1255 MAX_SMB2_CREATE_RESPONSE_SIZE -
1256 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1257 &rsp_iov[1], &resp_buftype[1], cifs_sb);
1258 if (rc == 0) {
1259 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1260 used_len = le32_to_cpu(rsp->OutputBufferLength);
1261 }
1262 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1263 resp_buftype[1] = CIFS_NO_BUFFER;
1264 memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1265 rc = 0;
1266
1267 /* Use a fudge factor of 256 bytes in case we collide
1268 * with a different set_EAs command.
1269 */
1270 if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1271 MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1272 used_len + ea_name_len + ea_value_len + 1) {
1273 rc = -ENOSPC;
1274 goto sea_exit;
1275 }
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001276 }
1277 }
1278
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001279 /* Open */
1280 memset(&open_iov, 0, sizeof(open_iov));
1281 rqst[0].rq_iov = open_iov;
1282 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1283
1284 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001285 oparms.tcon = tcon;
1286 oparms.desired_access = FILE_WRITE_EA;
1287 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001288 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001289 oparms.fid = &fid;
1290 oparms.reconnect = false;
1291
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001292 rc = SMB2_open_init(tcon, server,
1293 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001294 if (rc)
1295 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001296 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001297
1298
1299 /* Set Info */
1300 memset(&si_iov, 0, sizeof(si_iov));
1301 rqst[1].rq_iov = si_iov;
1302 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001303
Vladimir Zapolskiy64b7f672020-10-10 21:25:54 +03001304 len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001305 ea = kzalloc(len, GFP_KERNEL);
1306 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001307 rc = -ENOMEM;
1308 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001309 }
1310
1311 ea->ea_name_length = ea_name_len;
1312 ea->ea_value_length = cpu_to_le16(ea_value_len);
1313 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1314 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1315
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001316 size[0] = len;
1317 data[0] = ea;
1318
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001319 rc = SMB2_set_info_init(tcon, server,
1320 &rqst[1], COMPOUND_FID,
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001321 COMPOUND_FID, current->tgid,
1322 FILE_FULL_EA_INFORMATION,
1323 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001324 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001325 smb2_set_related(&rqst[1]);
1326
1327
1328 /* Close */
1329 memset(&close_iov, 0, sizeof(close_iov));
1330 rqst[2].rq_iov = close_iov;
1331 rqst[2].rq_nvec = 1;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001332 rc = SMB2_close_init(tcon, server,
1333 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001334 smb2_set_related(&rqst[2]);
1335
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001336 rc = compound_send_recv(xid, ses, server,
1337 flags, 3, rqst,
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001338 resp_buftype, rsp_iov);
Steve Frenchd2f15422019-09-22 00:55:46 -05001339 /* no need to bump num_remote_opens because handle immediately closed */
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001340
1341 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001342 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001343 kfree(utf16_path);
1344 SMB2_open_free(&rqst[0]);
1345 SMB2_set_info_free(&rqst[1]);
1346 SMB2_close_free(&rqst[2]);
1347 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1348 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1349 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001350 return rc;
1351}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001352#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001353
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001354static bool
1355smb2_can_echo(struct TCP_Server_Info *server)
1356{
1357 return server->echoes;
1358}
1359
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001360static void
1361smb2_clear_stats(struct cifs_tcon *tcon)
1362{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001363 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001364
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001365 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1366 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1367 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1368 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001369}
1370
1371static void
Steve French769ee6a2013-06-19 14:15:30 -05001372smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1373{
1374 seq_puts(m, "\n\tShare Capabilities:");
1375 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1376 seq_puts(m, " DFS,");
1377 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1378 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1379 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1380 seq_puts(m, " SCALEOUT,");
1381 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1382 seq_puts(m, " CLUSTER,");
1383 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1384 seq_puts(m, " ASYMMETRIC,");
1385 if (tcon->capabilities == 0)
1386 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001387 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1388 seq_puts(m, " Aligned,");
1389 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1390 seq_puts(m, " Partition Aligned,");
1391 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1392 seq_puts(m, " SSD,");
1393 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1394 seq_puts(m, " TRIM-support,");
1395
Steve French769ee6a2013-06-19 14:15:30 -05001396 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001397 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001398 if (tcon->perf_sector_size)
1399 seq_printf(m, "\tOptimal sector size: 0x%x",
1400 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001401 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001402}
1403
1404static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001405smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1406{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001407 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1408 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001409
1410 /*
1411 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1412 * totals (requests sent) since those SMBs are per-session not per tcon
1413 */
Steve French52ce1ac2018-07-31 01:46:47 -05001414 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1415 (long long)(tcon->bytes_read),
1416 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001417 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1418 atomic_read(&tcon->num_local_opens),
1419 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001420 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001421 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1422 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001423 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001424 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1425 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001426 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001427 atomic_read(&sent[SMB2_CREATE_HE]),
1428 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001429 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001430 atomic_read(&sent[SMB2_CLOSE_HE]),
1431 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001432 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001433 atomic_read(&sent[SMB2_FLUSH_HE]),
1434 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001435 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001436 atomic_read(&sent[SMB2_READ_HE]),
1437 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001438 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001439 atomic_read(&sent[SMB2_WRITE_HE]),
1440 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001441 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001442 atomic_read(&sent[SMB2_LOCK_HE]),
1443 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001444 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001445 atomic_read(&sent[SMB2_IOCTL_HE]),
1446 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001447 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001448 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1449 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001450 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001451 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1452 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001453 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001454 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1455 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001456 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001457 atomic_read(&sent[SMB2_SET_INFO_HE]),
1458 atomic_read(&failed[SMB2_SET_INFO_HE]));
1459 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1460 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1461 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001462}
1463
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001464static void
1465smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1466{
David Howells2b0143b2015-03-17 22:25:59 +00001467 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001468 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1469
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001470 cfile->fid.persistent_fid = fid->persistent_fid;
1471 cfile->fid.volatile_fid = fid->volatile_fid;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01001472 cfile->fid.access = fid->access;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001473#ifdef CONFIG_CIFS_DEBUG2
1474 cfile->fid.mid = fid->mid;
1475#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001476 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1477 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001478 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001479 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001480}
1481
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001482static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001483smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1484 struct cifs_fid *fid)
1485{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001486 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001487}
1488
Steve French43f8a6a2019-12-02 21:46:54 -06001489static void
1490smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
1491 struct cifsFileInfo *cfile)
1492{
1493 struct smb2_file_network_open_info file_inf;
1494 struct inode *inode;
1495 int rc;
1496
1497 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
1498 cfile->fid.volatile_fid, &file_inf);
1499 if (rc)
1500 return;
1501
1502 inode = d_inode(cfile->dentry);
1503
1504 spin_lock(&inode->i_lock);
1505 CIFS_I(inode)->time = jiffies;
1506
1507 /* Creation time should not need to be updated on close */
1508 if (file_inf.LastWriteTime)
1509 inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
1510 if (file_inf.ChangeTime)
1511 inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
1512 if (file_inf.LastAccessTime)
1513 inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
1514
1515 /*
1516 * i_blocks is not related to (i_size / i_blksize),
1517 * but instead 512 byte (2**9) size is required for
1518 * calculating num blocks.
1519 */
1520 if (le64_to_cpu(file_inf.AllocationSize) > 4096)
1521 inode->i_blocks =
1522 (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
1523
1524 /* End of file and Attributes should not have to be updated on close */
1525 spin_unlock(&inode->i_lock);
1526}
1527
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001528static int
Steve French41c13582013-11-14 00:05:36 -06001529SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1530 u64 persistent_fid, u64 volatile_fid,
1531 struct copychunk_ioctl *pcchunk)
1532{
1533 int rc;
1534 unsigned int ret_data_len;
1535 struct resume_key_req *res_key;
1536
1537 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1538 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001539 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001540 (char **)&res_key, &ret_data_len);
1541
1542 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001543 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
Steve French41c13582013-11-14 00:05:36 -06001544 goto req_res_key_exit;
1545 }
1546 if (ret_data_len < sizeof(struct resume_key_req)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001547 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
Steve French41c13582013-11-14 00:05:36 -06001548 rc = -EINVAL;
1549 goto req_res_key_exit;
1550 }
1551 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1552
1553req_res_key_exit:
1554 kfree(res_key);
1555 return rc;
1556}
1557
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001558struct iqi_vars {
1559 struct smb_rqst rqst[3];
1560 struct kvec rsp_iov[3];
1561 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1562 struct kvec qi_iov[1];
1563 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
1564 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1565 struct kvec close_iov[1];
1566};
1567
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001568static int
1569smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001570 struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02001571 struct cifs_sb_info *cifs_sb,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001572 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001573 unsigned long p)
1574{
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001575 struct iqi_vars *vars;
1576 struct smb_rqst *rqst;
1577 struct kvec *rsp_iov;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001578 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001579 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001580 char __user *arg = (char __user *)p;
1581 struct smb_query_info qi;
1582 struct smb_query_info __user *pqi;
1583 int rc = 0;
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03001584 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001585 struct smb2_query_info_rsp *qi_rsp = NULL;
1586 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001587 void *buffer = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001588 int resp_buftype[3];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001589 struct cifs_open_parms oparms;
1590 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1591 struct cifs_fid fid;
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001592 unsigned int size[2];
1593 void *data[2];
Amir Goldstein0f060932020-02-03 21:46:43 +02001594 int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001595
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001596 vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
1597 if (vars == NULL)
1598 return -ENOMEM;
1599 rqst = &vars->rqst[0];
1600 rsp_iov = &vars->rsp_iov[0];
1601
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001602 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001603
1604 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001605 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001606
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001607 if (qi.output_buffer_length > 1024) {
1608 kfree(vars);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001609 return -EINVAL;
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001610 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001611
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001612 if (!ses || !server) {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001613 kfree(vars);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001614 return -EIO;
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001615 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001616
1617 if (smb3_encryption_required(tcon))
1618 flags |= CIFS_TRANSFORM_REQ;
1619
Markus Elfringcfaa1182019-11-05 21:30:25 +01001620 buffer = memdup_user(arg + sizeof(struct smb_query_info),
1621 qi.output_buffer_length);
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001622 if (IS_ERR(buffer)) {
1623 kfree(vars);
Markus Elfringcfaa1182019-11-05 21:30:25 +01001624 return PTR_ERR(buffer);
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001625 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001626
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001627 /* Open */
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001628 rqst[0].rq_iov = &vars->open_iov[0];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001629 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001630
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001631 memset(&oparms, 0, sizeof(oparms));
1632 oparms.tcon = tcon;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001633 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001634 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001635 oparms.fid = &fid;
1636 oparms.reconnect = false;
1637
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001638 if (qi.flags & PASSTHRU_FSCTL) {
1639 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1640 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1641 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001642 break;
1643 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1644 oparms.desired_access = GENERIC_ALL;
1645 break;
1646 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1647 oparms.desired_access = GENERIC_READ;
1648 break;
1649 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1650 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001651 break;
1652 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001653 } else if (qi.flags & PASSTHRU_SET_INFO) {
1654 oparms.desired_access = GENERIC_WRITE;
1655 } else {
1656 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001657 }
1658
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001659 rc = SMB2_open_init(tcon, server,
1660 &rqst[0], &oplock, &oparms, path);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001661 if (rc)
1662 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001663 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001664
1665 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001666 if (qi.flags & PASSTHRU_FSCTL) {
1667 /* Can eventually relax perm check since server enforces too */
1668 if (!capable(CAP_SYS_ADMIN))
1669 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001670 else {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001671 rqst[1].rq_iov = &vars->io_iov[0];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001672 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1673
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001674 rc = SMB2_ioctl_init(tcon, server,
1675 &rqst[1],
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001676 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001677 qi.info_type, true, buffer,
1678 qi.output_buffer_length,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10001679 CIFSMaxBufSize -
1680 MAX_SMB2_CREATE_RESPONSE_SIZE -
1681 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001682 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001683 } else if (qi.flags == PASSTHRU_SET_INFO) {
1684 /* Can eventually relax perm check since server enforces too */
1685 if (!capable(CAP_SYS_ADMIN))
1686 rc = -EPERM;
1687 else {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001688 rqst[1].rq_iov = &vars->si_iov[0];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001689 rqst[1].rq_nvec = 1;
1690
1691 size[0] = 8;
1692 data[0] = buffer;
1693
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001694 rc = SMB2_set_info_init(tcon, server,
1695 &rqst[1],
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001696 COMPOUND_FID, COMPOUND_FID,
1697 current->tgid,
1698 FILE_END_OF_FILE_INFORMATION,
1699 SMB2_O_INFO_FILE, 0, data, size);
1700 }
Steve French31ba4332019-03-13 02:40:07 -05001701 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001702 rqst[1].rq_iov = &vars->qi_iov[0];
Steve French31ba4332019-03-13 02:40:07 -05001703 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001704
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001705 rc = SMB2_query_info_init(tcon, server,
1706 &rqst[1], COMPOUND_FID,
Steve French31ba4332019-03-13 02:40:07 -05001707 COMPOUND_FID, qi.file_info_class,
1708 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001709 qi.input_buffer_length,
1710 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001711 } else { /* unknown flags */
Joe Perchesa0a30362020-04-14 22:42:53 -07001712 cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
1713 qi.flags);
Steve French31ba4332019-03-13 02:40:07 -05001714 rc = -EINVAL;
1715 }
1716
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001717 if (rc)
1718 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001719 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001720 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001721
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001722 /* Close */
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001723 rqst[2].rq_iov = &vars->close_iov[0];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001724 rqst[2].rq_nvec = 1;
1725
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001726 rc = SMB2_close_init(tcon, server,
1727 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001728 if (rc)
1729 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001730 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001731
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001732 rc = compound_send_recv(xid, ses, server,
1733 flags, 3, rqst,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001734 resp_buftype, rsp_iov);
1735 if (rc)
1736 goto iqinf_exit;
Steve Frenchd2f15422019-09-22 00:55:46 -05001737
1738 /* No need to bump num_remote_opens since handle immediately closed */
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001739 if (qi.flags & PASSTHRU_FSCTL) {
1740 pqi = (struct smb_query_info __user *)arg;
1741 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1742 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1743 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001744 if (qi.input_buffer_length > 0 &&
Markus Elfring2b1116b2019-11-05 22:26:53 +01001745 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1746 > rsp_iov[1].iov_len)
1747 goto e_fault;
1748
1749 if (copy_to_user(&pqi->input_buffer_length,
1750 &qi.input_buffer_length,
1751 sizeof(qi.input_buffer_length)))
1752 goto e_fault;
1753
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001754 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1755 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
Markus Elfring2b1116b2019-11-05 22:26:53 +01001756 qi.input_buffer_length))
1757 goto e_fault;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001758 } else {
1759 pqi = (struct smb_query_info __user *)arg;
1760 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1761 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1762 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
Markus Elfring2b1116b2019-11-05 22:26:53 +01001763 if (copy_to_user(&pqi->input_buffer_length,
1764 &qi.input_buffer_length,
1765 sizeof(qi.input_buffer_length)))
1766 goto e_fault;
1767
1768 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1769 qi.input_buffer_length))
1770 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001771 }
1772
1773 iqinf_exit:
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001774 kfree(vars);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001775 kfree(buffer);
1776 SMB2_open_free(&rqst[0]);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001777 if (qi.flags & PASSTHRU_FSCTL)
1778 SMB2_ioctl_free(&rqst[1]);
1779 else
1780 SMB2_query_info_free(&rqst[1]);
1781
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001782 SMB2_close_free(&rqst[2]);
1783 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1784 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1785 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001786 return rc;
Markus Elfring2b1116b2019-11-05 22:26:53 +01001787
1788e_fault:
1789 rc = -EFAULT;
1790 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001791}
1792
Sachin Prabhu620d8742017-02-10 16:03:51 +05301793static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001794smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001795 struct cifsFileInfo *srcfile,
1796 struct cifsFileInfo *trgtfile, u64 src_off,
1797 u64 len, u64 dest_off)
1798{
1799 int rc;
1800 unsigned int ret_data_len;
1801 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001802 struct copychunk_ioctl_rsp *retbuf = NULL;
1803 struct cifs_tcon *tcon;
1804 int chunks_copied = 0;
1805 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301806 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001807
1808 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1809
1810 if (pcchunk == NULL)
1811 return -ENOMEM;
1812
Christoph Probsta205d502019-05-08 21:36:25 +02001813 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001814 /* Request a key from the server to identify the source of the copy */
1815 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1816 srcfile->fid.persistent_fid,
1817 srcfile->fid.volatile_fid, pcchunk);
1818
1819 /* Note: request_res_key sets res_key null only if rc !=0 */
1820 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001821 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001822
1823 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001824 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001825 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001826 pcchunk->Reserved2 = 0;
1827
Steve French9bf0c9c2013-11-16 18:05:28 -06001828 tcon = tlink_tcon(trgtfile->tlink);
1829
1830 while (len > 0) {
1831 pcchunk->SourceOffset = cpu_to_le64(src_off);
1832 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1833 pcchunk->Length =
1834 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1835
1836 /* Request server copy to target from src identified by key */
1837 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001838 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001839 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001840 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1841 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001842 if (rc == 0) {
1843 if (ret_data_len !=
1844 sizeof(struct copychunk_ioctl_rsp)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001845 cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001846 rc = -EIO;
1847 goto cchunk_out;
1848 }
1849 if (retbuf->TotalBytesWritten == 0) {
1850 cifs_dbg(FYI, "no bytes copied\n");
1851 rc = -EIO;
1852 goto cchunk_out;
1853 }
1854 /*
1855 * Check if server claimed to write more than we asked
1856 */
1857 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1858 le32_to_cpu(pcchunk->Length)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001859 cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001860 rc = -EIO;
1861 goto cchunk_out;
1862 }
1863 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001864 cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001865 rc = -EIO;
1866 goto cchunk_out;
1867 }
1868 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001869
Sachin Prabhu620d8742017-02-10 16:03:51 +05301870 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1871 src_off += bytes_written;
1872 dest_off += bytes_written;
1873 len -= bytes_written;
1874 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001875
Sachin Prabhu620d8742017-02-10 16:03:51 +05301876 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001877 le32_to_cpu(retbuf->ChunksWritten),
1878 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301879 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001880 } else if (rc == -EINVAL) {
1881 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1882 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001883
Steve French9bf0c9c2013-11-16 18:05:28 -06001884 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1885 le32_to_cpu(retbuf->ChunksWritten),
1886 le32_to_cpu(retbuf->ChunkBytesWritten),
1887 le32_to_cpu(retbuf->TotalBytesWritten));
1888
1889 /*
1890 * Check if this is the first request using these sizes,
1891 * (ie check if copy succeed once with original sizes
1892 * and check if the server gave us different sizes after
1893 * we already updated max sizes on previous request).
1894 * if not then why is the server returning an error now
1895 */
1896 if ((chunks_copied != 0) || chunk_sizes_updated)
1897 goto cchunk_out;
1898
1899 /* Check that server is not asking us to grow size */
1900 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1901 tcon->max_bytes_chunk)
1902 tcon->max_bytes_chunk =
1903 le32_to_cpu(retbuf->ChunkBytesWritten);
1904 else
1905 goto cchunk_out; /* server gave us bogus size */
1906
1907 /* No need to change MaxChunks since already set to 1 */
1908 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001909 } else
1910 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001911 }
1912
1913cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001914 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001915 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301916 if (rc)
1917 return rc;
1918 else
1919 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001920}
1921
1922static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001923smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1924 struct cifs_fid *fid)
1925{
1926 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1927}
1928
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001929static unsigned int
1930smb2_read_data_offset(char *buf)
1931{
1932 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001933
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001934 return rsp->DataOffset;
1935}
1936
1937static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001938smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001939{
1940 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001941
1942 if (in_remaining)
1943 return le32_to_cpu(rsp->DataRemaining);
1944
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001945 return le32_to_cpu(rsp->DataLength);
1946}
1947
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001948
1949static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001950smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001951 struct cifs_io_parms *parms, unsigned int *bytes_read,
1952 char **buf, int *buf_type)
1953{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001954 parms->persistent_fid = pfid->persistent_fid;
1955 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001956 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1957}
1958
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001959static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001960smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001961 struct cifs_io_parms *parms, unsigned int *written,
1962 struct kvec *iov, unsigned long nr_segs)
1963{
1964
Steve Frenchdb8b6312014-09-22 05:13:55 -05001965 parms->persistent_fid = pfid->persistent_fid;
1966 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001967 return SMB2_write(xid, parms, written, iov, nr_segs);
1968}
1969
Steve Frenchd43cc792014-08-13 17:16:29 -05001970/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1971static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1972 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1973{
1974 struct cifsInodeInfo *cifsi;
1975 int rc;
1976
1977 cifsi = CIFS_I(inode);
1978
1979 /* if file already sparse don't bother setting sparse again */
1980 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1981 return true; /* already sparse */
1982
1983 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1984 return true; /* already not sparse */
1985
1986 /*
1987 * Can't check for sparse support on share the usual way via the
1988 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1989 * since Samba server doesn't set the flag on the share, yet
1990 * supports the set sparse FSCTL and returns sparse correctly
1991 * in the file attributes. If we fail setting sparse though we
1992 * mark that server does not support sparse files for this share
1993 * to avoid repeatedly sending the unsupported fsctl to server
1994 * if the file is repeatedly extended.
1995 */
1996 if (tcon->broken_sparse_sup)
1997 return false;
1998
1999 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2000 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002001 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05002002 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05002003 if (rc) {
2004 tcon->broken_sparse_sup = true;
2005 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
2006 return false;
2007 }
2008
2009 if (setsparse)
2010 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
2011 else
2012 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
2013
2014 return true;
2015}
2016
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002017static int
2018smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
2019 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
2020{
2021 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05002022 struct inode *inode;
2023
2024 /*
2025 * If extending file more than one page make sparse. Many Linux fs
2026 * make files sparse by default when extending via ftruncate
2027 */
David Howells2b0143b2015-03-17 22:25:59 +00002028 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05002029
2030 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05002031 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05002032
Steve Frenchd43cc792014-08-13 17:16:29 -05002033 /* whether set sparse succeeds or not, extend the file */
2034 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05002035 }
2036
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002037 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10002038 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002039}
2040
Steve French02b16662015-06-27 21:18:36 -07002041static int
2042smb2_duplicate_extents(const unsigned int xid,
2043 struct cifsFileInfo *srcfile,
2044 struct cifsFileInfo *trgtfile, u64 src_off,
2045 u64 len, u64 dest_off)
2046{
2047 int rc;
2048 unsigned int ret_data_len;
Steve Frenchcfc63fc2021-03-26 18:41:55 -05002049 struct inode *inode;
Steve French02b16662015-06-27 21:18:36 -07002050 struct duplicate_extents_to_file dup_ext_buf;
2051 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
2052
2053 /* server fileays advertise duplicate extent support with this flag */
2054 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
2055 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
2056 return -EOPNOTSUPP;
2057
2058 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
2059 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
2060 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
2061 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
2062 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02002063 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07002064 src_off, dest_off, len);
2065
Steve Frenchcfc63fc2021-03-26 18:41:55 -05002066 inode = d_inode(trgtfile->dentry);
2067 if (inode->i_size < dest_off + len) {
2068 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
2069 if (rc)
2070 goto duplicate_extents_out;
Steve French02b16662015-06-27 21:18:36 -07002071
Steve Frenchcfc63fc2021-03-26 18:41:55 -05002072 /*
2073 * Although also could set plausible allocation size (i_blocks)
2074 * here in addition to setting the file size, in reflink
2075 * it is likely that the target file is sparse. Its allocation
2076 * size will be queried on next revalidate, but it is important
2077 * to make sure that file's cached size is updated immediately
2078 */
2079 cifs_setsize(inode, dest_off + len);
2080 }
Steve French02b16662015-06-27 21:18:36 -07002081 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
2082 trgtfile->fid.volatile_fid,
2083 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002084 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01002085 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07002086 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05002087 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07002088 &ret_data_len);
2089
2090 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02002091 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07002092
2093duplicate_extents_out:
2094 return rc;
2095}
Steve French02b16662015-06-27 21:18:36 -07002096
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002097static int
Steve French64a5cfa2013-10-14 15:31:32 -05002098smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
2099 struct cifsFileInfo *cfile)
2100{
2101 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
2102 cfile->fid.volatile_fid);
2103}
2104
2105static int
Steve Frenchb3152e22015-06-24 03:17:02 -05002106smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
2107 struct cifsFileInfo *cfile)
2108{
2109 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05002110 unsigned int ret_data_len;
2111
2112 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
2113 integr_info.Flags = 0;
2114 integr_info.Reserved = 0;
2115
2116 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2117 cfile->fid.volatile_fid,
2118 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002119 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01002120 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05002121 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05002122 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05002123 &ret_data_len);
2124
2125}
2126
Steve Frenche02789a2018-08-09 14:33:12 -05002127/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
2128#define GMT_TOKEN_SIZE 50
2129
Steve French153322f2019-03-28 22:32:49 -05002130#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
2131
Steve Frenche02789a2018-08-09 14:33:12 -05002132/*
2133 * Input buffer contains (empty) struct smb_snapshot array with size filled in
2134 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
2135 */
Steve Frenchb3152e22015-06-24 03:17:02 -05002136static int
Steve French834170c2016-09-30 21:14:26 -05002137smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2138 struct cifsFileInfo *cfile, void __user *ioc_buf)
2139{
2140 char *retbuf = NULL;
2141 unsigned int ret_data_len = 0;
2142 int rc;
Steve French153322f2019-03-28 22:32:49 -05002143 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05002144 struct smb_snapshot_array snapshot_in;
2145
Steve French973189a2019-04-04 00:41:04 -05002146 /*
2147 * On the first query to enumerate the list of snapshots available
2148 * for this volume the buffer begins with 0 (number of snapshots
2149 * which can be returned is zero since at that point we do not know
2150 * how big the buffer needs to be). On the second query,
2151 * it (ret_data_len) is set to number of snapshots so we can
2152 * know to set the maximum response size larger (see below).
2153 */
Steve French153322f2019-03-28 22:32:49 -05002154 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
2155 return -EFAULT;
2156
2157 /*
2158 * Note that for snapshot queries that servers like Azure expect that
2159 * the first query be minimal size (and just used to get the number/size
2160 * of previous versions) so response size must be specified as EXACTLY
2161 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2162 * of eight bytes.
2163 */
2164 if (ret_data_len == 0)
2165 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
2166 else
2167 max_response_size = CIFSMaxBufSize;
2168
Steve French834170c2016-09-30 21:14:26 -05002169 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2170 cfile->fid.volatile_fid,
2171 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002172 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002173 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05002174 (char **)&retbuf,
2175 &ret_data_len);
2176 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
2177 rc, ret_data_len);
2178 if (rc)
2179 return rc;
2180
2181 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
2182 /* Fixup buffer */
2183 if (copy_from_user(&snapshot_in, ioc_buf,
2184 sizeof(struct smb_snapshot_array))) {
2185 rc = -EFAULT;
2186 kfree(retbuf);
2187 return rc;
2188 }
Steve French834170c2016-09-30 21:14:26 -05002189
Steve Frenche02789a2018-08-09 14:33:12 -05002190 /*
2191 * Check for min size, ie not large enough to fit even one GMT
2192 * token (snapshot). On the first ioctl some users may pass in
2193 * smaller size (or zero) to simply get the size of the array
2194 * so the user space caller can allocate sufficient memory
2195 * and retry the ioctl again with larger array size sufficient
2196 * to hold all of the snapshot GMT tokens on the second try.
2197 */
2198 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2199 ret_data_len = sizeof(struct smb_snapshot_array);
2200
2201 /*
2202 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2203 * the snapshot array (of 50 byte GMT tokens) each
2204 * representing an available previous version of the data
2205 */
2206 if (ret_data_len > (snapshot_in.snapshot_array_size +
2207 sizeof(struct smb_snapshot_array)))
2208 ret_data_len = snapshot_in.snapshot_array_size +
2209 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05002210
2211 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2212 rc = -EFAULT;
2213 }
2214
2215 kfree(retbuf);
2216 return rc;
2217}
2218
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002219
2220
2221static int
2222smb3_notify(const unsigned int xid, struct file *pfile,
2223 void __user *ioc_buf)
2224{
2225 struct smb3_notify notify;
2226 struct dentry *dentry = pfile->f_path.dentry;
2227 struct inode *inode = file_inode(pfile);
Al Virof6a9bc32021-03-05 17:36:04 -05002228 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002229 struct cifs_open_parms oparms;
2230 struct cifs_fid fid;
2231 struct cifs_tcon *tcon;
Al Virof6a9bc32021-03-05 17:36:04 -05002232 const unsigned char *path;
2233 void *page = alloc_dentry_path();
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002234 __le16 *utf16_path = NULL;
2235 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2236 int rc = 0;
2237
Al Virof6a9bc32021-03-05 17:36:04 -05002238 path = build_path_from_dentry(dentry, page);
2239 if (IS_ERR(path)) {
2240 rc = PTR_ERR(path);
2241 goto notify_exit;
2242 }
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002243
2244 utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
2245 if (utf16_path == NULL) {
2246 rc = -ENOMEM;
2247 goto notify_exit;
2248 }
2249
2250 if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
2251 rc = -EFAULT;
2252 goto notify_exit;
2253 }
2254
2255 tcon = cifs_sb_master_tcon(cifs_sb);
2256 oparms.tcon = tcon;
Steve French4ef9b4f2020-07-07 18:08:46 -05002257 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002258 oparms.disposition = FILE_OPEN;
2259 oparms.create_options = cifs_create_options(cifs_sb, 0);
2260 oparms.fid = &fid;
2261 oparms.reconnect = false;
2262
Aurelien Aptel69dda302020-03-02 17:53:22 +01002263 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
2264 NULL);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002265 if (rc)
2266 goto notify_exit;
2267
2268 rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
2269 notify.watch_tree, notify.completion_filter);
2270
2271 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2272
2273 cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
2274
2275notify_exit:
Al Virof6a9bc32021-03-05 17:36:04 -05002276 free_dentry_path(page);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002277 kfree(utf16_path);
2278 return rc;
2279}
2280
Steve French834170c2016-09-30 21:14:26 -05002281static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002282smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2283 const char *path, struct cifs_sb_info *cifs_sb,
2284 struct cifs_fid *fid, __u16 search_flags,
2285 struct cifs_search_info *srch_inf)
2286{
2287 __le16 *utf16_path;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002288 struct smb_rqst rqst[2];
2289 struct kvec rsp_iov[2];
2290 int resp_buftype[2];
2291 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2292 struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
2293 int rc, flags = 0;
2294 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002295 struct cifs_open_parms oparms;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002296 struct smb2_query_directory_rsp *qd_rsp = NULL;
2297 struct smb2_create_rsp *op_rsp = NULL;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002298 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002299
2300 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2301 if (!utf16_path)
2302 return -ENOMEM;
2303
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002304 if (smb3_encryption_required(tcon))
2305 flags |= CIFS_TRANSFORM_REQ;
2306
2307 memset(rqst, 0, sizeof(rqst));
2308 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
2309 memset(rsp_iov, 0, sizeof(rsp_iov));
2310
2311 /* Open */
2312 memset(&open_iov, 0, sizeof(open_iov));
2313 rqst[0].rq_iov = open_iov;
2314 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2315
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002316 oparms.tcon = tcon;
2317 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2318 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002319 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002320 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002321 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002322
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002323 rc = SMB2_open_init(tcon, server,
2324 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002325 if (rc)
2326 goto qdf_free;
2327 smb2_set_next_command(tcon, &rqst[0]);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002328
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002329 /* Query directory */
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002330 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02002331 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002332
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002333 memset(&qd_iov, 0, sizeof(qd_iov));
2334 rqst[1].rq_iov = qd_iov;
2335 rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
2336
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002337 rc = SMB2_query_directory_init(xid, tcon, server,
2338 &rqst[1],
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002339 COMPOUND_FID, COMPOUND_FID,
2340 0, srch_inf->info_level);
2341 if (rc)
2342 goto qdf_free;
2343
2344 smb2_set_related(&rqst[1]);
2345
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002346 rc = compound_send_recv(xid, tcon->ses, server,
2347 flags, 2, rqst,
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002348 resp_buftype, rsp_iov);
2349
2350 /* If the open failed there is nothing to do */
2351 op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
2352 if (op_rsp == NULL || op_rsp->sync_hdr.Status != STATUS_SUCCESS) {
2353 cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
2354 goto qdf_free;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002355 }
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002356 fid->persistent_fid = op_rsp->PersistentFileId;
2357 fid->volatile_fid = op_rsp->VolatileFileId;
2358
2359 /* Anything else than ENODATA means a genuine error */
2360 if (rc && rc != -ENODATA) {
2361 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2362 cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
2363 trace_smb3_query_dir_err(xid, fid->persistent_fid,
2364 tcon->tid, tcon->ses->Suid, 0, 0, rc);
2365 goto qdf_free;
2366 }
2367
Shyam Prasad N1be1fa42020-03-09 01:35:09 -07002368 atomic_inc(&tcon->num_remote_opens);
2369
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002370 qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
2371 if (qd_rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
2372 trace_smb3_query_dir_done(xid, fid->persistent_fid,
2373 tcon->tid, tcon->ses->Suid, 0, 0);
2374 srch_inf->endOfSearch = true;
2375 rc = 0;
2376 goto qdf_free;
2377 }
2378
2379 rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
2380 srch_inf);
2381 if (rc) {
2382 trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
2383 tcon->ses->Suid, 0, 0, rc);
2384 goto qdf_free;
2385 }
2386 resp_buftype[1] = CIFS_NO_BUFFER;
2387
2388 trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
2389 tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
2390
2391 qdf_free:
2392 kfree(utf16_path);
2393 SMB2_open_free(&rqst[0]);
2394 SMB2_query_directory_free(&rqst[1]);
2395 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2396 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002397 return rc;
2398}
2399
2400static int
2401smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2402 struct cifs_fid *fid, __u16 search_flags,
2403 struct cifs_search_info *srch_inf)
2404{
2405 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2406 fid->volatile_fid, 0, srch_inf);
2407}
2408
2409static int
2410smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2411 struct cifs_fid *fid)
2412{
2413 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2414}
2415
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002416/*
Christoph Probsta205d502019-05-08 21:36:25 +02002417 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2418 * the number of credits and return true. Otherwise - return false.
2419 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002420static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002421smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002422{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002423 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Shyam Prasad N6d82c272021-02-03 23:20:46 -08002424 int scredits, in_flight;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002425
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002426 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002427 return false;
2428
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002429 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002430 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002431 server->credits += le16_to_cpu(shdr->CreditRequest);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -08002432 scredits = server->credits;
Shyam Prasad N6d82c272021-02-03 23:20:46 -08002433 in_flight = server->in_flight;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002434 spin_unlock(&server->req_lock);
2435 wake_up(&server->request_q);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -08002436
2437 trace_smb3_add_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -08002438 server->conn_id, server->hostname, scredits,
2439 le16_to_cpu(shdr->CreditRequest), in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -08002440 cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
2441 __func__, le16_to_cpu(shdr->CreditRequest), scredits);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002442 }
2443
2444 return true;
2445}
2446
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002447static bool
2448smb2_is_session_expired(char *buf)
2449{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002450 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002451
Mark Symsd81243c2018-05-24 09:47:31 +01002452 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2453 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002454 return false;
2455
Steve Frenche68a9322018-07-30 14:23:58 -05002456 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2457 le16_to_cpu(shdr->Command),
2458 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002459 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002460
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002461 return true;
2462}
2463
Rohith Surabattula8e670f72020-09-18 05:37:28 +00002464static bool
2465smb2_is_status_io_timeout(char *buf)
2466{
2467 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
2468
2469 if (shdr->Status == STATUS_IO_TIMEOUT)
2470 return true;
2471 else
2472 return false;
2473}
2474
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002475static void
2476smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
2477{
2478 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
2479 struct list_head *tmp, *tmp1;
2480 struct cifs_ses *ses;
2481 struct cifs_tcon *tcon;
2482
Steve Frenchf1a08652021-02-20 18:52:15 -06002483 if (shdr->Status != STATUS_NETWORK_NAME_DELETED)
2484 return;
2485
2486 spin_lock(&cifs_tcp_ses_lock);
2487 list_for_each(tmp, &server->smb_ses_list) {
2488 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
2489 list_for_each(tmp1, &ses->tcon_list) {
2490 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
2491 if (tcon->tid == shdr->TreeId) {
2492 tcon->need_reconnect = true;
2493 spin_unlock(&cifs_tcp_ses_lock);
2494 pr_warn_once("Server share %s deleted.\n",
2495 tcon->treeName);
2496 return;
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002497 }
2498 }
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002499 }
Steve Frenchf1a08652021-02-20 18:52:15 -06002500 spin_unlock(&cifs_tcp_ses_lock);
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002501}
2502
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002503static int
2504smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2505 struct cifsInodeInfo *cinode)
2506{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002507 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2508 return SMB2_lease_break(0, tcon, cinode->lease_key,
2509 smb2_get_lease_state(cinode));
2510
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002511 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2512 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002513 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002514}
2515
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002516void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002517smb2_set_related(struct smb_rqst *rqst)
2518{
2519 struct smb2_sync_hdr *shdr;
2520
2521 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002522 if (shdr == NULL) {
2523 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2524 return;
2525 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002526 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2527}
2528
2529char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2530
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002531void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002532smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002533{
2534 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002535 struct cifs_ses *ses = tcon->ses;
2536 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002537 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002538 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002539
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002540 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2541 if (shdr == NULL) {
2542 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2543 return;
2544 }
2545
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002546 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002547
2548 /* No padding needed */
2549 if (!(len & 7))
2550 goto finished;
2551
2552 num_padding = 8 - (len & 7);
2553 if (!smb3_encryption_required(tcon)) {
2554 /*
2555 * If we do not have encryption then we can just add an extra
2556 * iov for the padding.
2557 */
2558 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2559 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2560 rqst->rq_nvec++;
2561 len += num_padding;
2562 } else {
2563 /*
2564 * We can not add a small padding iov for the encryption case
2565 * because the encryption framework can not handle the padding
2566 * iovs.
2567 * We have to flatten this into a single buffer and add
2568 * the padding to it.
2569 */
2570 for (i = 1; i < rqst->rq_nvec; i++) {
2571 memcpy(rqst->rq_iov[0].iov_base +
2572 rqst->rq_iov[0].iov_len,
2573 rqst->rq_iov[i].iov_base,
2574 rqst->rq_iov[i].iov_len);
2575 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002576 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002577 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2578 0, num_padding);
2579 rqst->rq_iov[0].iov_len += num_padding;
2580 len += num_padding;
2581 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002582 }
2583
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002584 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002585 shdr->NextCommand = cpu_to_le32(len);
2586}
2587
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002588/*
2589 * Passes the query info response back to the caller on success.
2590 * Caller need to free this with free_rsp_buf().
2591 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002592int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002593smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2594 __le16 *utf16_path, u32 desired_access,
2595 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002596 struct kvec *rsp, int *buftype,
2597 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002598{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002599 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002600 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03002601 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002602 struct smb_rqst rqst[3];
2603 int resp_buftype[3];
2604 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002605 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002606 struct kvec qi_iov[1];
2607 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002608 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002609 struct cifs_open_parms oparms;
2610 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002611 int rc;
2612
2613 if (smb3_encryption_required(tcon))
2614 flags |= CIFS_TRANSFORM_REQ;
2615
2616 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002617 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002618 memset(rsp_iov, 0, sizeof(rsp_iov));
2619
2620 memset(&open_iov, 0, sizeof(open_iov));
2621 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002622 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002623
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002624 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002625 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002626 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002627 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002628 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002629 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002630
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002631 rc = SMB2_open_init(tcon, server,
2632 &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002633 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002634 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002635 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002636
2637 memset(&qi_iov, 0, sizeof(qi_iov));
2638 rqst[1].rq_iov = qi_iov;
2639 rqst[1].rq_nvec = 1;
2640
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002641 rc = SMB2_query_info_init(tcon, server,
2642 &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002643 class, type, 0,
2644 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002645 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002646 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002647 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002648 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002649 smb2_set_related(&rqst[1]);
2650
2651 memset(&close_iov, 0, sizeof(close_iov));
2652 rqst[2].rq_iov = close_iov;
2653 rqst[2].rq_nvec = 1;
2654
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002655 rc = SMB2_close_init(tcon, server,
2656 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002657 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002658 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002659 smb2_set_related(&rqst[2]);
2660
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002661 rc = compound_send_recv(xid, ses, server,
2662 flags, 3, rqst,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002663 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002664 if (rc) {
2665 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French7dcc82c2019-09-11 00:07:36 -05002666 if (rc == -EREMCHG) {
2667 tcon->need_reconnect = true;
Joe Perchesa0a30362020-04-14 22:42:53 -07002668 pr_warn_once("server share %s deleted\n",
2669 tcon->treeName);
Steve French7dcc82c2019-09-11 00:07:36 -05002670 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002671 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002672 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002673 *rsp = rsp_iov[1];
2674 *buftype = resp_buftype[1];
2675
2676 qic_exit:
2677 SMB2_open_free(&rqst[0]);
2678 SMB2_query_info_free(&rqst[1]);
2679 SMB2_close_free(&rqst[2]);
2680 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2681 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2682 return rc;
2683}
2684
2685static int
2686smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002687 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002688{
2689 struct smb2_query_info_rsp *rsp;
2690 struct smb2_fs_full_size_info *info = NULL;
2691 __le16 utf16_path = 0; /* Null - open root of share */
2692 struct kvec rsp_iov = {NULL, 0};
2693 int buftype = CIFS_NO_BUFFER;
2694 int rc;
2695
2696
2697 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2698 FILE_READ_ATTRIBUTES,
2699 FS_FULL_SIZE_INFORMATION,
2700 SMB2_O_INFO_FILESYSTEM,
2701 sizeof(struct smb2_fs_full_size_info),
Steve French87f93d82020-02-04 13:02:59 -06002702 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002703 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002704 goto qfs_exit;
2705
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002706 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002707 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002708 info = (struct smb2_fs_full_size_info *)(
2709 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2710 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2711 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002712 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002713 sizeof(struct smb2_fs_full_size_info));
2714 if (!rc)
2715 smb2_copy_fs_info_to_kstatfs(info, buf);
2716
2717qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002718 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002719 return rc;
2720}
2721
Steve French2d304212018-06-24 23:28:12 -05002722static int
2723smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002724 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Steve French2d304212018-06-24 23:28:12 -05002725{
2726 int rc;
2727 __le16 srch_path = 0; /* Null - open root of share */
2728 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2729 struct cifs_open_parms oparms;
2730 struct cifs_fid fid;
2731
2732 if (!tcon->posix_extensions)
Amir Goldstein0f060932020-02-03 21:46:43 +02002733 return smb2_queryfs(xid, tcon, cifs_sb, buf);
Steve French2d304212018-06-24 23:28:12 -05002734
2735 oparms.tcon = tcon;
2736 oparms.desired_access = FILE_READ_ATTRIBUTES;
2737 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002738 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French2d304212018-06-24 23:28:12 -05002739 oparms.fid = &fid;
2740 oparms.reconnect = false;
2741
Aurelien Aptel69dda302020-03-02 17:53:22 +01002742 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
2743 NULL, NULL);
Steve French2d304212018-06-24 23:28:12 -05002744 if (rc)
2745 return rc;
2746
2747 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2748 fid.volatile_fid, buf);
2749 buf->f_type = SMB2_MAGIC_NUMBER;
2750 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2751 return rc;
2752}
Steve French2d304212018-06-24 23:28:12 -05002753
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002754static bool
2755smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2756{
2757 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2758 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2759}
2760
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002761static int
2762smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2763 __u64 length, __u32 type, int lock, int unlock, bool wait)
2764{
2765 if (unlock && !lock)
2766 type = SMB2_LOCKFLAG_UNLOCK;
2767 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2768 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2769 current->tgid, length, offset, type, wait);
2770}
2771
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002772static void
2773smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2774{
2775 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2776}
2777
2778static void
2779smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2780{
2781 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2782}
2783
2784static void
2785smb2_new_lease_key(struct cifs_fid *fid)
2786{
Steve Frenchfa70b872016-09-22 00:39:34 -05002787 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002788}
2789
Aurelien Aptel9d496402017-02-13 16:16:49 +01002790static int
2791smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2792 const char *search_name,
2793 struct dfs_info3_param **target_nodes,
2794 unsigned int *num_of_nodes,
2795 const struct nls_table *nls_codepage, int remap)
2796{
2797 int rc;
2798 __le16 *utf16_path = NULL;
2799 int utf16_path_len = 0;
2800 struct cifs_tcon *tcon;
2801 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2802 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2803 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2804
Christoph Probsta205d502019-05-08 21:36:25 +02002805 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002806
2807 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002808 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002809 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002810 tcon = ses->tcon_ipc;
2811 if (tcon == NULL) {
2812 spin_lock(&cifs_tcp_ses_lock);
2813 tcon = list_first_entry_or_null(&ses->tcon_list,
2814 struct cifs_tcon,
2815 tcon_list);
2816 if (tcon)
2817 tcon->tc_count++;
2818 spin_unlock(&cifs_tcp_ses_lock);
2819 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002820
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002821 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002822 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2823 ses);
2824 rc = -ENOTCONN;
2825 goto out;
2826 }
2827
2828 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2829 &utf16_path_len,
2830 nls_codepage, remap);
2831 if (!utf16_path) {
2832 rc = -ENOMEM;
2833 goto out;
2834 }
2835
2836 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2837 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2838 if (!dfs_req) {
2839 rc = -ENOMEM;
2840 goto out;
2841 }
2842
2843 /* Highest DFS referral version understood */
2844 dfs_req->MaxReferralLevel = DFS_VERSION;
2845
2846 /* Path to resolve in an UTF-16 null-terminated string */
2847 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2848
2849 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002850 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2851 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002852 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002853 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002854 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002855 } while (rc == -EAGAIN);
2856
2857 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002858 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002859 cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002860 goto out;
2861 }
2862
2863 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2864 num_of_nodes, target_nodes,
2865 nls_codepage, remap, search_name,
2866 true /* is_unicode */);
2867 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002868 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002869 goto out;
2870 }
2871
2872 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002873 if (tcon && !tcon->ipc) {
2874 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002875 spin_lock(&cifs_tcp_ses_lock);
2876 tcon->tc_count--;
2877 spin_unlock(&cifs_tcp_ses_lock);
2878 }
2879 kfree(utf16_path);
2880 kfree(dfs_req);
2881 kfree(dfs_rsp);
2882 return rc;
2883}
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002884
2885static int
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002886parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2887 u32 plen, char **target_path,
2888 struct cifs_sb_info *cifs_sb)
2889{
2890 unsigned int len;
2891
2892 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2893 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2894
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002895 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2896 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2897 le64_to_cpu(symlink_buf->InodeType));
2898 return -EOPNOTSUPP;
2899 }
2900
2901 *target_path = cifs_strndup_from_utf16(
2902 symlink_buf->PathBuffer,
2903 len, true, cifs_sb->local_nls);
2904 if (!(*target_path))
2905 return -ENOMEM;
2906
2907 convert_delimiter(*target_path, '/');
2908 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2909
2910 return 0;
2911}
2912
2913static int
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002914parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2915 u32 plen, char **target_path,
2916 struct cifs_sb_info *cifs_sb)
2917{
2918 unsigned int sub_len;
2919 unsigned int sub_offset;
2920
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002921 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002922
2923 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2924 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2925 if (sub_offset + 20 > plen ||
2926 sub_offset + sub_len + 20 > plen) {
2927 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2928 return -EIO;
2929 }
2930
2931 *target_path = cifs_strndup_from_utf16(
2932 symlink_buf->PathBuffer + sub_offset,
2933 sub_len, true, cifs_sb->local_nls);
2934 if (!(*target_path))
2935 return -ENOMEM;
2936
2937 convert_delimiter(*target_path, '/');
2938 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2939
2940 return 0;
2941}
2942
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002943static int
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002944parse_reparse_point(struct reparse_data_buffer *buf,
2945 u32 plen, char **target_path,
2946 struct cifs_sb_info *cifs_sb)
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002947{
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002948 if (plen < sizeof(struct reparse_data_buffer)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07002949 cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n",
2950 plen);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002951 return -EIO;
2952 }
2953
2954 if (plen < le16_to_cpu(buf->ReparseDataLength) +
2955 sizeof(struct reparse_data_buffer)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07002956 cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n",
2957 plen);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002958 return -EIO;
2959 }
2960
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002961 /* See MS-FSCC 2.1.2 */
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002962 switch (le32_to_cpu(buf->ReparseTag)) {
2963 case IO_REPARSE_TAG_NFS:
2964 return parse_reparse_posix(
2965 (struct reparse_posix_data *)buf,
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002966 plen, target_path, cifs_sb);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002967 case IO_REPARSE_TAG_SYMLINK:
2968 return parse_reparse_symlink(
2969 (struct reparse_symlink_data_buffer *)buf,
2970 plen, target_path, cifs_sb);
2971 default:
Joe Perchesa0a30362020-04-14 22:42:53 -07002972 cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n",
2973 le32_to_cpu(buf->ReparseTag));
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002974 return -EOPNOTSUPP;
2975 }
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002976}
2977
Pavel Shilovsky78932422016-07-24 10:37:38 +03002978#define SMB2_SYMLINK_STRUCT_SIZE \
2979 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2980
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002981static int
2982smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002983 struct cifs_sb_info *cifs_sb, const char *full_path,
2984 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002985{
2986 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10002987 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002988 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2989 struct cifs_open_parms oparms;
2990 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10002991 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10002992 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04002993 struct smb2_symlink_err_rsp *symlink;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002994 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovsky78932422016-07-24 10:37:38 +03002995 unsigned int sub_len;
2996 unsigned int sub_offset;
2997 unsigned int print_len;
2998 unsigned int print_offset;
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03002999 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003000 struct smb_rqst rqst[3];
3001 int resp_buftype[3];
3002 struct kvec rsp_iov[3];
3003 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
3004 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
3005 struct kvec close_iov[1];
3006 struct smb2_create_rsp *create_rsp;
3007 struct smb2_ioctl_rsp *ioctl_rsp;
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003008 struct reparse_data_buffer *reparse_buf;
Amir Goldstein0f060932020-02-03 21:46:43 +02003009 int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003010 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003011
3012 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
3013
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003014 *target_path = NULL;
3015
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003016 if (smb3_encryption_required(tcon))
3017 flags |= CIFS_TRANSFORM_REQ;
3018
3019 memset(rqst, 0, sizeof(rqst));
3020 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
3021 memset(rsp_iov, 0, sizeof(rsp_iov));
3022
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003023 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
3024 if (!utf16_path)
3025 return -ENOMEM;
3026
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003027 /* Open */
3028 memset(&open_iov, 0, sizeof(open_iov));
3029 rqst[0].rq_iov = open_iov;
3030 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
3031
3032 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003033 oparms.tcon = tcon;
3034 oparms.desired_access = FILE_READ_ATTRIBUTES;
3035 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02003036 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003037 oparms.fid = &fid;
3038 oparms.reconnect = false;
3039
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003040 rc = SMB2_open_init(tcon, server,
3041 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003042 if (rc)
3043 goto querty_exit;
3044 smb2_set_next_command(tcon, &rqst[0]);
3045
3046
3047 /* IOCTL */
3048 memset(&io_iov, 0, sizeof(io_iov));
3049 rqst[1].rq_iov = io_iov;
3050 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
3051
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003052 rc = SMB2_ioctl_init(tcon, server,
3053 &rqst[1], fid.persistent_fid,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003054 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10003055 true /* is_fctl */, NULL, 0,
3056 CIFSMaxBufSize -
3057 MAX_SMB2_CREATE_RESPONSE_SIZE -
3058 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003059 if (rc)
3060 goto querty_exit;
3061
3062 smb2_set_next_command(tcon, &rqst[1]);
3063 smb2_set_related(&rqst[1]);
3064
3065
3066 /* Close */
3067 memset(&close_iov, 0, sizeof(close_iov));
3068 rqst[2].rq_iov = close_iov;
3069 rqst[2].rq_nvec = 1;
3070
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003071 rc = SMB2_close_init(tcon, server,
3072 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003073 if (rc)
3074 goto querty_exit;
3075
3076 smb2_set_related(&rqst[2]);
3077
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003078 rc = compound_send_recv(xid, tcon->ses, server,
3079 flags, 3, rqst,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003080 resp_buftype, rsp_iov);
3081
3082 create_rsp = rsp_iov[0].iov_base;
3083 if (create_rsp && create_rsp->sync_hdr.Status)
3084 err_iov = rsp_iov[0];
3085 ioctl_rsp = rsp_iov[1].iov_base;
3086
3087 /*
3088 * Open was successful and we got an ioctl response.
3089 */
3090 if ((rc == 0) && (is_reparse_point)) {
3091 /* See MS-FSCC 2.3.23 */
3092
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003093 reparse_buf = (struct reparse_data_buffer *)
3094 ((char *)ioctl_rsp +
3095 le32_to_cpu(ioctl_rsp->OutputOffset));
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003096 plen = le32_to_cpu(ioctl_rsp->OutputCount);
3097
3098 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
3099 rsp_iov[1].iov_len) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003100 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003101 plen);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003102 rc = -EIO;
3103 goto querty_exit;
3104 }
3105
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10003106 rc = parse_reparse_point(reparse_buf, plen, target_path,
3107 cifs_sb);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003108 goto querty_exit;
3109 }
3110
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05003111 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003112 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003113 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003114 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03003115
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10003116 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003117 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003118 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10003119 rc = -EINVAL;
3120 goto querty_exit;
3121 }
3122
3123 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
3124 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
3125 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
3126 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003127 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003128 }
3129
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003130 /* open must fail on symlink - reset rc */
3131 rc = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003132 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
3133 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03003134 print_len = le16_to_cpu(symlink->PrintNameLength);
3135 print_offset = le16_to_cpu(symlink->PrintNameOffset);
3136
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003137 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10003138 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003139 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003140 }
3141
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003142 if (err_iov.iov_len <
3143 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10003144 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003145 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003146 }
3147
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003148 *target_path = cifs_strndup_from_utf16(
3149 (char *)symlink->PathBuffer + sub_offset,
3150 sub_len, true, cifs_sb->local_nls);
3151 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003152 rc = -ENOMEM;
3153 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003154 }
3155 convert_delimiter(*target_path, '/');
3156 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003157
3158 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003159 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003160 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003161 SMB2_open_free(&rqst[0]);
3162 SMB2_ioctl_free(&rqst[1]);
3163 SMB2_close_free(&rqst[2]);
3164 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
3165 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
3166 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003167 return rc;
3168}
3169
Steve French2e4564b2020-10-22 22:03:14 -05003170int
3171smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
3172 struct cifs_sb_info *cifs_sb, const char *full_path,
3173 __u32 *tag)
3174{
3175 int rc;
3176 __le16 *utf16_path = NULL;
3177 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3178 struct cifs_open_parms oparms;
3179 struct cifs_fid fid;
Steve French2e4564b2020-10-22 22:03:14 -05003180 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03003181 int flags = CIFS_CP_CREATE_CLOSE_OP;
Steve French2e4564b2020-10-22 22:03:14 -05003182 struct smb_rqst rqst[3];
3183 int resp_buftype[3];
3184 struct kvec rsp_iov[3];
3185 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
3186 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
3187 struct kvec close_iov[1];
Steve French2e4564b2020-10-22 22:03:14 -05003188 struct smb2_ioctl_rsp *ioctl_rsp;
3189 struct reparse_data_buffer *reparse_buf;
3190 u32 plen;
3191
3192 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
3193
3194 if (smb3_encryption_required(tcon))
3195 flags |= CIFS_TRANSFORM_REQ;
3196
3197 memset(rqst, 0, sizeof(rqst));
3198 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
3199 memset(rsp_iov, 0, sizeof(rsp_iov));
3200
3201 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
3202 if (!utf16_path)
3203 return -ENOMEM;
3204
3205 /*
3206 * setup smb2open - TODO add optimization to call cifs_get_readable_path
3207 * to see if there is a handle already open that we can use
3208 */
3209 memset(&open_iov, 0, sizeof(open_iov));
3210 rqst[0].rq_iov = open_iov;
3211 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
3212
3213 memset(&oparms, 0, sizeof(oparms));
3214 oparms.tcon = tcon;
3215 oparms.desired_access = FILE_READ_ATTRIBUTES;
3216 oparms.disposition = FILE_OPEN;
3217 oparms.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT);
3218 oparms.fid = &fid;
3219 oparms.reconnect = false;
3220
3221 rc = SMB2_open_init(tcon, server,
3222 &rqst[0], &oplock, &oparms, utf16_path);
3223 if (rc)
3224 goto query_rp_exit;
3225 smb2_set_next_command(tcon, &rqst[0]);
3226
3227
3228 /* IOCTL */
3229 memset(&io_iov, 0, sizeof(io_iov));
3230 rqst[1].rq_iov = io_iov;
3231 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
3232
3233 rc = SMB2_ioctl_init(tcon, server,
Namjae Jeon79631782020-12-03 12:31:36 +09003234 &rqst[1], COMPOUND_FID,
3235 COMPOUND_FID, FSCTL_GET_REPARSE_POINT,
Steve French2e4564b2020-10-22 22:03:14 -05003236 true /* is_fctl */, NULL, 0,
3237 CIFSMaxBufSize -
3238 MAX_SMB2_CREATE_RESPONSE_SIZE -
3239 MAX_SMB2_CLOSE_RESPONSE_SIZE);
3240 if (rc)
3241 goto query_rp_exit;
3242
3243 smb2_set_next_command(tcon, &rqst[1]);
3244 smb2_set_related(&rqst[1]);
3245
3246
3247 /* Close */
3248 memset(&close_iov, 0, sizeof(close_iov));
3249 rqst[2].rq_iov = close_iov;
3250 rqst[2].rq_nvec = 1;
3251
3252 rc = SMB2_close_init(tcon, server,
3253 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
3254 if (rc)
3255 goto query_rp_exit;
3256
3257 smb2_set_related(&rqst[2]);
3258
3259 rc = compound_send_recv(xid, tcon->ses, server,
3260 flags, 3, rqst,
3261 resp_buftype, rsp_iov);
3262
Steve French2e4564b2020-10-22 22:03:14 -05003263 ioctl_rsp = rsp_iov[1].iov_base;
3264
3265 /*
3266 * Open was successful and we got an ioctl response.
3267 */
3268 if (rc == 0) {
3269 /* See MS-FSCC 2.3.23 */
3270
3271 reparse_buf = (struct reparse_data_buffer *)
3272 ((char *)ioctl_rsp +
3273 le32_to_cpu(ioctl_rsp->OutputOffset));
3274 plen = le32_to_cpu(ioctl_rsp->OutputCount);
3275
3276 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
3277 rsp_iov[1].iov_len) {
3278 cifs_tcon_dbg(FYI, "srv returned invalid ioctl len: %d\n",
3279 plen);
3280 rc = -EIO;
3281 goto query_rp_exit;
3282 }
3283 *tag = le32_to_cpu(reparse_buf->ReparseTag);
3284 }
3285
3286 query_rp_exit:
3287 kfree(utf16_path);
3288 SMB2_open_free(&rqst[0]);
3289 SMB2_ioctl_free(&rqst[1]);
3290 SMB2_close_free(&rqst[2]);
3291 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
3292 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
3293 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
3294 return rc;
3295}
3296
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003297static struct cifs_ntsd *
3298get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003299 const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003300{
3301 struct cifs_ntsd *pntsd = NULL;
3302 unsigned int xid;
3303 int rc = -EOPNOTSUPP;
3304 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3305
3306 if (IS_ERR(tlink))
3307 return ERR_CAST(tlink);
3308
3309 xid = get_xid();
3310 cifs_dbg(FYI, "trying to get acl\n");
3311
3312 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003313 cifsfid->volatile_fid, (void **)&pntsd, pacllen,
3314 info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003315 free_xid(xid);
3316
3317 cifs_put_tlink(tlink);
3318
3319 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3320 if (rc)
3321 return ERR_PTR(rc);
3322 return pntsd;
3323
3324}
3325
3326static struct cifs_ntsd *
3327get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003328 const char *path, u32 *pacllen, u32 info)
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003329{
3330 struct cifs_ntsd *pntsd = NULL;
3331 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3332 unsigned int xid;
3333 int rc;
3334 struct cifs_tcon *tcon;
3335 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3336 struct cifs_fid fid;
3337 struct cifs_open_parms oparms;
3338 __le16 *utf16_path;
3339
3340 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
3341 if (IS_ERR(tlink))
3342 return ERR_CAST(tlink);
3343
3344 tcon = tlink_tcon(tlink);
3345 xid = get_xid();
3346
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003347 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05003348 if (!utf16_path) {
3349 rc = -ENOMEM;
3350 free_xid(xid);
3351 return ERR_PTR(rc);
3352 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003353
3354 oparms.tcon = tcon;
3355 oparms.desired_access = READ_CONTROL;
3356 oparms.disposition = FILE_OPEN;
Steve French3c3317d2020-10-21 13:12:08 -05003357 /*
3358 * When querying an ACL, even if the file is a symlink we want to open
3359 * the source not the target, and so the protocol requires that the
3360 * client specify this flag when opening a reparse point
3361 */
3362 oparms.create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003363 oparms.fid = &fid;
3364 oparms.reconnect = false;
3365
Boris Protopopov3970acf2020-12-18 11:30:12 -06003366 if (info & SACL_SECINFO)
3367 oparms.desired_access |= SYSTEM_SECURITY;
3368
Aurelien Aptel69dda302020-03-02 17:53:22 +01003369 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
3370 NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003371 kfree(utf16_path);
3372 if (!rc) {
3373 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003374 fid.volatile_fid, (void **)&pntsd, pacllen,
3375 info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003376 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3377 }
3378
3379 cifs_put_tlink(tlink);
3380 free_xid(xid);
3381
3382 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3383 if (rc)
3384 return ERR_PTR(rc);
3385 return pntsd;
3386}
3387
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003388static int
3389set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
3390 struct inode *inode, const char *path, int aclflag)
3391{
3392 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3393 unsigned int xid;
3394 int rc, access_flags = 0;
3395 struct cifs_tcon *tcon;
3396 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3397 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3398 struct cifs_fid fid;
3399 struct cifs_open_parms oparms;
3400 __le16 *utf16_path;
3401
3402 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
3403 if (IS_ERR(tlink))
3404 return PTR_ERR(tlink);
3405
3406 tcon = tlink_tcon(tlink);
3407 xid = get_xid();
3408
Boris Protopopov3970acf2020-12-18 11:30:12 -06003409 if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
3410 access_flags |= WRITE_OWNER;
3411 if (aclflag & CIFS_ACL_SACL)
3412 access_flags |= SYSTEM_SECURITY;
3413 if (aclflag & CIFS_ACL_DACL)
3414 access_flags |= WRITE_DAC;
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003415
3416 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05003417 if (!utf16_path) {
3418 rc = -ENOMEM;
3419 free_xid(xid);
3420 return rc;
3421 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003422
3423 oparms.tcon = tcon;
3424 oparms.desired_access = access_flags;
Amir Goldstein0f060932020-02-03 21:46:43 +02003425 oparms.create_options = cifs_create_options(cifs_sb, 0);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003426 oparms.disposition = FILE_OPEN;
3427 oparms.path = path;
3428 oparms.fid = &fid;
3429 oparms.reconnect = false;
3430
Aurelien Aptel69dda302020-03-02 17:53:22 +01003431 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
3432 NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003433 kfree(utf16_path);
3434 if (!rc) {
3435 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3436 fid.volatile_fid, pnntsd, acllen, aclflag);
3437 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3438 }
3439
3440 cifs_put_tlink(tlink);
3441 free_xid(xid);
3442 return rc;
3443}
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003444
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003445/* Retrieve an ACL from the server */
3446static struct cifs_ntsd *
3447get_smb2_acl(struct cifs_sb_info *cifs_sb,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003448 struct inode *inode, const char *path,
3449 u32 *pacllen, u32 info)
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003450{
3451 struct cifs_ntsd *pntsd = NULL;
3452 struct cifsFileInfo *open_file = NULL;
3453
Boris Protopopov9541b812020-12-17 20:58:08 +00003454 if (inode && !(info & SACL_SECINFO))
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003455 open_file = find_readable_file(CIFS_I(inode), true);
Boris Protopopov9541b812020-12-17 20:58:08 +00003456 if (!open_file || (info & SACL_SECINFO))
Boris Protopopov3970acf2020-12-18 11:30:12 -06003457 return get_smb2_acl_by_path(cifs_sb, path, pacllen, info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003458
Boris Protopopov3970acf2020-12-18 11:30:12 -06003459 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003460 cifsFileInfo_put(open_file);
3461 return pntsd;
3462}
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003463
Steve French30175622014-08-17 18:16:40 -05003464static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
3465 loff_t offset, loff_t len, bool keep_size)
3466{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003467 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05003468 struct inode *inode;
3469 struct cifsInodeInfo *cifsi;
3470 struct cifsFileInfo *cfile = file->private_data;
3471 struct file_zero_data_information fsctl_buf;
3472 long rc;
3473 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003474 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05003475
3476 xid = get_xid();
3477
David Howells2b0143b2015-03-17 22:25:59 +00003478 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05003479 cifsi = CIFS_I(inode);
3480
Christoph Probsta205d502019-05-08 21:36:25 +02003481 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05003482 ses->Suid, offset, len);
3483
Zhang Xiaoxu6b690402020-06-23 07:31:54 -04003484 /*
3485 * We zero the range through ioctl, so we need remove the page caches
3486 * first, otherwise the data may be inconsistent with the server.
3487 */
3488 truncate_pagecache_range(inode, offset, offset + len - 1);
Steve French779ede02019-03-13 01:41:49 -05003489
Steve French30175622014-08-17 18:16:40 -05003490 /* if file not oplocked can't be sure whether asking to extend size */
3491 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003492 if (keep_size == false) {
3493 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003494 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
3495 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003496 free_xid(xid);
3497 return rc;
3498 }
Steve French30175622014-08-17 18:16:40 -05003499
Steve Frenchd1c35af2019-05-09 00:09:37 -05003500 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05003501
3502 fsctl_buf.FileOffset = cpu_to_le64(offset);
3503 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3504
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003505 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3506 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
3507 (char *)&fsctl_buf,
3508 sizeof(struct file_zero_data_information),
3509 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003510 if (rc)
3511 goto zero_range_exit;
3512
3513 /*
3514 * do we also need to change the size of the file?
3515 */
3516 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003517 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003518 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3519 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003520 }
3521
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003522 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05003523 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05003524 if (rc)
3525 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3526 ses->Suid, offset, len, rc);
3527 else
3528 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3529 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05003530 return rc;
3531}
3532
Steve French31742c52014-08-17 08:38:47 -05003533static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3534 loff_t offset, loff_t len)
3535{
3536 struct inode *inode;
Steve French31742c52014-08-17 08:38:47 -05003537 struct cifsFileInfo *cfile = file->private_data;
3538 struct file_zero_data_information fsctl_buf;
3539 long rc;
3540 unsigned int xid;
3541 __u8 set_sparse = 1;
3542
3543 xid = get_xid();
3544
David Howells2b0143b2015-03-17 22:25:59 +00003545 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05003546
3547 /* Need to make file sparse, if not already, before freeing range. */
3548 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05003549 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3550 rc = -EOPNOTSUPP;
3551 free_xid(xid);
3552 return rc;
3553 }
Steve French31742c52014-08-17 08:38:47 -05003554
Zhang Xiaoxuacc91c22020-06-23 07:31:53 -04003555 /*
3556 * We implement the punch hole through ioctl, so we need remove the page
3557 * caches first, otherwise the data may be inconsistent with the server.
3558 */
3559 truncate_pagecache_range(inode, offset, offset + len - 1);
3560
Christoph Probsta205d502019-05-08 21:36:25 +02003561 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05003562
3563 fsctl_buf.FileOffset = cpu_to_le64(offset);
3564 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3565
3566 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3567 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01003568 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05003569 sizeof(struct file_zero_data_information),
3570 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05003571 free_xid(xid);
3572 return rc;
3573}
3574
Steve French9ccf3212014-10-18 17:01:15 -05003575static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3576 loff_t off, loff_t len, bool keep_size)
3577{
3578 struct inode *inode;
3579 struct cifsInodeInfo *cifsi;
3580 struct cifsFileInfo *cfile = file->private_data;
3581 long rc = -EOPNOTSUPP;
3582 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003583 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05003584
3585 xid = get_xid();
3586
David Howells2b0143b2015-03-17 22:25:59 +00003587 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05003588 cifsi = CIFS_I(inode);
3589
Steve French779ede02019-03-13 01:41:49 -05003590 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3591 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003592 /* if file not oplocked can't be sure whether asking to extend size */
3593 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003594 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05003595 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3596 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003597 free_xid(xid);
3598 return rc;
3599 }
Steve French9ccf3212014-10-18 17:01:15 -05003600
3601 /*
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003602 * Extending the file
3603 */
3604 if ((keep_size == false) && i_size_read(inode) < off + len) {
Murphy Zhouef4a6322020-03-18 20:43:38 +08003605 rc = inode_newsize_ok(inode, off + len);
3606 if (rc)
3607 goto out;
3608
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003609 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0)
3610 smb2_set_sparse(xid, tcon, cfile, inode, false);
3611
3612 eof = cpu_to_le64(off + len);
3613 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3614 cfile->fid.volatile_fid, cfile->pid, &eof);
3615 if (rc == 0) {
3616 cifsi->server_eof = off + len;
3617 cifs_setsize(inode, off + len);
3618 cifs_truncate_page(inode->i_mapping, inode->i_size);
3619 truncate_setsize(inode, off + len);
3620 }
3621 goto out;
3622 }
3623
3624 /*
Steve French9ccf3212014-10-18 17:01:15 -05003625 * Files are non-sparse by default so falloc may be a no-op
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003626 * Must check if file sparse. If not sparse, and since we are not
3627 * extending then no need to do anything since file already allocated
Steve French9ccf3212014-10-18 17:01:15 -05003628 */
3629 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003630 rc = 0;
3631 goto out;
Steve French9ccf3212014-10-18 17:01:15 -05003632 }
3633
3634 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3635 /*
3636 * Check if falloc starts within first few pages of file
3637 * and ends within a few pages of the end of file to
3638 * ensure that most of file is being forced to be
3639 * fallocated now. If so then setting whole file sparse
3640 * ie potentially making a few extra pages at the beginning
3641 * or end of the file non-sparse via set_sparse is harmless.
3642 */
Steve Frenchcfe89092018-05-19 02:04:55 -05003643 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3644 rc = -EOPNOTSUPP;
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003645 goto out;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003646 }
Steve French9ccf3212014-10-18 17:01:15 -05003647 }
Steve French9ccf3212014-10-18 17:01:15 -05003648
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003649 smb2_set_sparse(xid, tcon, cfile, inode, false);
3650 rc = 0;
3651
3652out:
Steve French779ede02019-03-13 01:41:49 -05003653 if (rc)
3654 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3655 tcon->ses->Suid, off, len, rc);
3656 else
3657 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3658 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003659
3660 free_xid(xid);
3661 return rc;
3662}
3663
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003664static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3665{
3666 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3667 struct cifsInodeInfo *cifsi;
3668 struct inode *inode;
3669 int rc = 0;
3670 struct file_allocated_range_buffer in_data, *out_data = NULL;
3671 u32 out_data_len;
3672 unsigned int xid;
3673
3674 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3675 return generic_file_llseek(file, offset, whence);
3676
3677 inode = d_inode(cfile->dentry);
3678 cifsi = CIFS_I(inode);
3679
3680 if (offset < 0 || offset >= i_size_read(inode))
3681 return -ENXIO;
3682
3683 xid = get_xid();
3684 /*
3685 * We need to be sure that all dirty pages are written as they
3686 * might fill holes on the server.
3687 * Note that we also MUST flush any written pages since at least
3688 * some servers (Windows2016) will not reflect recent writes in
3689 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3690 */
Aurelien Aptel86f740f2020-02-21 11:19:06 +01003691 wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003692 if (wrcfile) {
3693 filemap_write_and_wait(inode->i_mapping);
3694 smb2_flush_file(xid, tcon, &wrcfile->fid);
3695 cifsFileInfo_put(wrcfile);
3696 }
3697
3698 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3699 if (whence == SEEK_HOLE)
3700 offset = i_size_read(inode);
3701 goto lseek_exit;
3702 }
3703
3704 in_data.file_offset = cpu_to_le64(offset);
3705 in_data.length = cpu_to_le64(i_size_read(inode));
3706
3707 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3708 cfile->fid.volatile_fid,
3709 FSCTL_QUERY_ALLOCATED_RANGES, true,
3710 (char *)&in_data, sizeof(in_data),
3711 sizeof(struct file_allocated_range_buffer),
3712 (char **)&out_data, &out_data_len);
3713 if (rc == -E2BIG)
3714 rc = 0;
3715 if (rc)
3716 goto lseek_exit;
3717
3718 if (whence == SEEK_HOLE && out_data_len == 0)
3719 goto lseek_exit;
3720
3721 if (whence == SEEK_DATA && out_data_len == 0) {
3722 rc = -ENXIO;
3723 goto lseek_exit;
3724 }
3725
3726 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3727 rc = -EINVAL;
3728 goto lseek_exit;
3729 }
3730 if (whence == SEEK_DATA) {
3731 offset = le64_to_cpu(out_data->file_offset);
3732 goto lseek_exit;
3733 }
3734 if (offset < le64_to_cpu(out_data->file_offset))
3735 goto lseek_exit;
3736
3737 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3738
3739 lseek_exit:
3740 free_xid(xid);
3741 kfree(out_data);
3742 if (!rc)
3743 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3744 else
3745 return rc;
3746}
3747
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003748static int smb3_fiemap(struct cifs_tcon *tcon,
3749 struct cifsFileInfo *cfile,
3750 struct fiemap_extent_info *fei, u64 start, u64 len)
3751{
3752 unsigned int xid;
3753 struct file_allocated_range_buffer in_data, *out_data;
3754 u32 out_data_len;
3755 int i, num, rc, flags, last_blob;
3756 u64 next;
3757
Christoph Hellwig45dd0522020-05-23 09:30:14 +02003758 rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
Christoph Hellwigcddf8a22020-05-23 09:30:13 +02003759 if (rc)
3760 return rc;
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003761
3762 xid = get_xid();
3763 again:
3764 in_data.file_offset = cpu_to_le64(start);
3765 in_data.length = cpu_to_le64(len);
3766
3767 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3768 cfile->fid.volatile_fid,
3769 FSCTL_QUERY_ALLOCATED_RANGES, true,
3770 (char *)&in_data, sizeof(in_data),
3771 1024 * sizeof(struct file_allocated_range_buffer),
3772 (char **)&out_data, &out_data_len);
3773 if (rc == -E2BIG) {
3774 last_blob = 0;
3775 rc = 0;
3776 } else
3777 last_blob = 1;
3778 if (rc)
3779 goto out;
3780
Murphy Zhou979a2662020-03-14 11:38:31 +08003781 if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003782 rc = -EINVAL;
3783 goto out;
3784 }
3785 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3786 rc = -EINVAL;
3787 goto out;
3788 }
3789
3790 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3791 for (i = 0; i < num; i++) {
3792 flags = 0;
3793 if (i == num - 1 && last_blob)
3794 flags |= FIEMAP_EXTENT_LAST;
3795
3796 rc = fiemap_fill_next_extent(fei,
3797 le64_to_cpu(out_data[i].file_offset),
3798 le64_to_cpu(out_data[i].file_offset),
3799 le64_to_cpu(out_data[i].length),
3800 flags);
3801 if (rc < 0)
3802 goto out;
3803 if (rc == 1) {
3804 rc = 0;
3805 goto out;
3806 }
3807 }
3808
3809 if (!last_blob) {
3810 next = le64_to_cpu(out_data[num - 1].file_offset) +
3811 le64_to_cpu(out_data[num - 1].length);
3812 len = len - (next - start);
3813 start = next;
3814 goto again;
3815 }
3816
3817 out:
3818 free_xid(xid);
3819 kfree(out_data);
3820 return rc;
3821}
Steve French9ccf3212014-10-18 17:01:15 -05003822
Steve French31742c52014-08-17 08:38:47 -05003823static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3824 loff_t off, loff_t len)
3825{
3826 /* KEEP_SIZE already checked for by do_fallocate */
3827 if (mode & FALLOC_FL_PUNCH_HOLE)
3828 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05003829 else if (mode & FALLOC_FL_ZERO_RANGE) {
3830 if (mode & FALLOC_FL_KEEP_SIZE)
3831 return smb3_zero_range(file, tcon, off, len, true);
3832 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05003833 } else if (mode == FALLOC_FL_KEEP_SIZE)
3834 return smb3_simple_falloc(file, tcon, off, len, true);
3835 else if (mode == 0)
3836 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05003837
3838 return -EOPNOTSUPP;
3839}
3840
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003841static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003842smb2_downgrade_oplock(struct TCP_Server_Info *server,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003843 struct cifsInodeInfo *cinode, __u32 oplock,
3844 unsigned int epoch, bool *purge_cache)
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003845{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003846 server->ops->set_oplock_level(cinode, oplock, 0, NULL);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003847}
3848
3849static void
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003850smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3851 unsigned int epoch, bool *purge_cache);
3852
3853static void
3854smb3_downgrade_oplock(struct TCP_Server_Info *server,
3855 struct cifsInodeInfo *cinode, __u32 oplock,
3856 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003857{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003858 unsigned int old_state = cinode->oplock;
3859 unsigned int old_epoch = cinode->epoch;
3860 unsigned int new_state;
3861
3862 if (epoch > old_epoch) {
3863 smb21_set_oplock_level(cinode, oplock, 0, NULL);
3864 cinode->epoch = epoch;
3865 }
3866
3867 new_state = cinode->oplock;
3868 *purge_cache = false;
3869
3870 if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
3871 (new_state & CIFS_CACHE_READ_FLG) == 0)
3872 *purge_cache = true;
3873 else if (old_state == new_state && (epoch - old_epoch > 1))
3874 *purge_cache = true;
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003875}
3876
3877static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003878smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3879 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003880{
3881 oplock &= 0xFF;
3882 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3883 return;
3884 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003885 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003886 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3887 &cinode->vfs_inode);
3888 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003889 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003890 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3891 &cinode->vfs_inode);
3892 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3893 cinode->oplock = CIFS_CACHE_READ_FLG;
3894 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3895 &cinode->vfs_inode);
3896 } else
3897 cinode->oplock = 0;
3898}
3899
3900static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003901smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3902 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003903{
3904 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003905 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003906
3907 oplock &= 0xFF;
3908 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3909 return;
3910
Pavel Shilovskya016e272019-09-26 12:31:20 -07003911 /* Check if the server granted an oplock rather than a lease */
3912 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3913 return smb2_set_oplock_level(cinode, oplock, epoch,
3914 purge_cache);
3915
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003916 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003917 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003918 strcat(message, "R");
3919 }
3920 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003921 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003922 strcat(message, "H");
3923 }
3924 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003925 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003926 strcat(message, "W");
3927 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02003928 if (!new_oplock)
3929 strncpy(message, "None", sizeof(message));
3930
3931 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003932 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3933 &cinode->vfs_inode);
3934}
3935
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003936static void
3937smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3938 unsigned int epoch, bool *purge_cache)
3939{
3940 unsigned int old_oplock = cinode->oplock;
3941
3942 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3943
3944 if (purge_cache) {
3945 *purge_cache = false;
3946 if (old_oplock == CIFS_CACHE_READ_FLG) {
3947 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3948 (epoch - cinode->epoch > 0))
3949 *purge_cache = true;
3950 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3951 (epoch - cinode->epoch > 1))
3952 *purge_cache = true;
3953 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3954 (epoch - cinode->epoch > 1))
3955 *purge_cache = true;
3956 else if (cinode->oplock == 0 &&
3957 (epoch - cinode->epoch > 0))
3958 *purge_cache = true;
3959 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
3960 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3961 (epoch - cinode->epoch > 0))
3962 *purge_cache = true;
3963 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3964 (epoch - cinode->epoch > 1))
3965 *purge_cache = true;
3966 }
3967 cinode->epoch = epoch;
3968 }
3969}
3970
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003971static bool
3972smb2_is_read_op(__u32 oplock)
3973{
3974 return oplock == SMB2_OPLOCK_LEVEL_II;
3975}
3976
3977static bool
3978smb21_is_read_op(__u32 oplock)
3979{
3980 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3981 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3982}
3983
Pavel Shilovskyf0473902013-09-04 13:44:05 +04003984static __le32
3985map_oplock_to_lease(u8 oplock)
3986{
3987 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3988 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3989 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3990 return SMB2_LEASE_READ_CACHING;
3991 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3992 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3993 SMB2_LEASE_WRITE_CACHING;
3994 return 0;
3995}
3996
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04003997static char *
3998smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3999{
4000 struct create_lease *buf;
4001
4002 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
4003 if (!buf)
4004 return NULL;
4005
Stefano Brivio729c0c92018-07-05 15:10:02 +02004006 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004007 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004008
4009 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4010 (struct create_lease, lcontext));
4011 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
4012 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4013 (struct create_lease, Name));
4014 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07004015 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004016 buf->Name[0] = 'R';
4017 buf->Name[1] = 'q';
4018 buf->Name[2] = 'L';
4019 buf->Name[3] = 's';
4020 return (char *)buf;
4021}
4022
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004023static char *
4024smb3_create_lease_buf(u8 *lease_key, u8 oplock)
4025{
4026 struct create_lease_v2 *buf;
4027
4028 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
4029 if (!buf)
4030 return NULL;
4031
Stefano Brivio729c0c92018-07-05 15:10:02 +02004032 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004033 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
4034
4035 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4036 (struct create_lease_v2, lcontext));
4037 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
4038 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4039 (struct create_lease_v2, Name));
4040 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07004041 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004042 buf->Name[0] = 'R';
4043 buf->Name[1] = 'q';
4044 buf->Name[2] = 'L';
4045 buf->Name[3] = 's';
4046 return (char *)buf;
4047}
4048
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004049static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06004050smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004051{
4052 struct create_lease *lc = (struct create_lease *)buf;
4053
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004054 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004055 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
4056 return SMB2_OPLOCK_LEVEL_NOCHANGE;
4057 return le32_to_cpu(lc->lcontext.LeaseState);
4058}
4059
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004060static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06004061smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004062{
4063 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
4064
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004065 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004066 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
4067 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06004068 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02004069 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004070 return le32_to_cpu(lc->lcontext.LeaseState);
4071}
4072
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004073static unsigned int
4074smb2_wp_retry_size(struct inode *inode)
4075{
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10004076 return min_t(unsigned int, CIFS_SB(inode->i_sb)->ctx->wsize,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004077 SMB2_MAX_BUFFER_SIZE);
4078}
4079
Pavel Shilovsky52755802014-08-18 20:49:57 +04004080static bool
4081smb2_dir_needs_close(struct cifsFileInfo *cfile)
4082{
4083 return !cfile->invalidHandle;
4084}
4085
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004086static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004087fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
Steve French2b2f7542019-06-07 15:16:10 -05004088 struct smb_rqst *old_rq, __le16 cipher_type)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004089{
4090 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004091 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004092
4093 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
4094 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
4095 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
4096 tr_hdr->Flags = cpu_to_le16(0x01);
Steve French63ca5652020-10-15 23:41:40 -05004097 if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4098 (cipher_type == SMB2_ENCRYPTION_AES256_GCM))
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004099 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05004100 else
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004101 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004102 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004103}
4104
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11004105/* We can not use the normal sg_set_buf() as we will sometimes pass a
4106 * stack object as buf.
4107 */
4108static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
4109 unsigned int buflen)
4110{
Sebastien Tisserantee9d6612019-08-01 12:06:08 -05004111 void *addr;
4112 /*
4113 * VMAP_STACK (at least) puts stack into the vmalloc address space
4114 */
4115 if (is_vmalloc_addr(buf))
4116 addr = vmalloc_to_page(buf);
4117 else
4118 addr = virt_to_page(buf);
4119 sg_set_page(sg, addr, buflen, offset_in_page(buf));
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11004120}
4121
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004122/* Assumes the first rqst has a transform header as the first iov.
4123 * I.e.
4124 * rqst[0].rq_iov[0] is transform header
4125 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
4126 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004127 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004128static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004129init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004130{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004131 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004132 struct scatterlist *sg;
4133 unsigned int i;
4134 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004135 unsigned int idx = 0;
4136 int skip;
4137
4138 sg_len = 1;
4139 for (i = 0; i < num_rqst; i++)
4140 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004141
4142 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
4143 if (!sg)
4144 return NULL;
4145
4146 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004147 for (i = 0; i < num_rqst; i++) {
4148 for (j = 0; j < rqst[i].rq_nvec; j++) {
4149 /*
4150 * The first rqst has a transform header where the
4151 * first 20 bytes are not part of the encrypted blob
4152 */
4153 skip = (i == 0) && (j == 0) ? 20 : 0;
4154 smb2_sg_set_buf(&sg[idx++],
4155 rqst[i].rq_iov[j].iov_base + skip,
4156 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10004157 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05004158
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004159 for (j = 0; j < rqst[i].rq_npages; j++) {
4160 unsigned int len, offset;
4161
4162 rqst_page_get_length(&rqst[i], j, &len, &offset);
4163 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
4164 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004165 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004166 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004167 return sg;
4168}
4169
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004170static int
4171smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
4172{
4173 struct cifs_ses *ses;
4174 u8 *ses_enc_key;
4175
4176 spin_lock(&cifs_tcp_ses_lock);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02004177 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
4178 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
4179 if (ses->Suid == ses_id) {
4180 ses_enc_key = enc ? ses->smb3encryptionkey :
4181 ses->smb3decryptionkey;
Shyam Prasad N45a45462021-03-25 12:34:54 +00004182 memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02004183 spin_unlock(&cifs_tcp_ses_lock);
4184 return 0;
4185 }
4186 }
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004187 }
4188 spin_unlock(&cifs_tcp_ses_lock);
4189
4190 return 1;
4191}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004192/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004193 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
4194 * iov[0] - transform header (associate data),
4195 * iov[1-N] - SMB2 header and pages - data to encrypt.
4196 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004197 * untouched.
4198 */
4199static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004200crypt_message(struct TCP_Server_Info *server, int num_rqst,
4201 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004202{
4203 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004204 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004205 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004206 int rc = 0;
4207 struct scatterlist *sg;
4208 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Shyam Prasad N45a45462021-03-25 12:34:54 +00004209 u8 key[SMB3_ENC_DEC_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004210 struct aead_request *req;
4211 char *iv;
4212 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01004213 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004214 struct crypto_aead *tfm;
4215 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4216
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004217 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
4218 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004219 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004220 enc ? "en" : "de");
Shyam Prasad N0bd294b2020-10-15 10:41:31 -07004221 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004222 }
4223
4224 rc = smb3_crypto_aead_allocate(server);
4225 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004226 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004227 return rc;
4228 }
4229
4230 tfm = enc ? server->secmech.ccmaesencrypt :
4231 server->secmech.ccmaesdecrypt;
Steve French63ca5652020-10-15 23:41:40 -05004232
Shyam Prasad N45a45462021-03-25 12:34:54 +00004233 if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
4234 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
Steve French63ca5652020-10-15 23:41:40 -05004235 rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
4236 else
Shyam Prasad N45a45462021-03-25 12:34:54 +00004237 rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
Steve French63ca5652020-10-15 23:41:40 -05004238
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004239 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004240 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004241 return rc;
4242 }
4243
4244 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
4245 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004246 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004247 return rc;
4248 }
4249
4250 req = aead_request_alloc(tfm, GFP_KERNEL);
4251 if (!req) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004252 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004253 return -ENOMEM;
4254 }
4255
4256 if (!enc) {
4257 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
4258 crypt_len += SMB2_SIGNATURE_SIZE;
4259 }
4260
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004261 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004262 if (!sg) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004263 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02004264 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004265 goto free_req;
4266 }
4267
4268 iv_len = crypto_aead_ivsize(tfm);
4269 iv = kzalloc(iv_len, GFP_KERNEL);
4270 if (!iv) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004271 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02004272 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004273 goto free_sg;
4274 }
Steve French2b2f7542019-06-07 15:16:10 -05004275
Steve French63ca5652020-10-15 23:41:40 -05004276 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4277 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004278 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05004279 else {
4280 iv[0] = 3;
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004281 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05004282 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004283
4284 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
4285 aead_request_set_ad(req, assoc_data_len);
4286
4287 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01004288 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004289
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01004290 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
4291 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004292
4293 if (!rc && enc)
4294 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
4295
4296 kfree(iv);
4297free_sg:
4298 kfree(sg);
4299free_req:
4300 kfree(req);
4301 return rc;
4302}
4303
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004304void
4305smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004306{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004307 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004308
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004309 for (i = 0; i < num_rqst; i++) {
4310 if (rqst[i].rq_pages) {
4311 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
4312 put_page(rqst[i].rq_pages[j]);
4313 kfree(rqst[i].rq_pages);
4314 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004315 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004316}
4317
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004318/*
4319 * This function will initialize new_rq and encrypt the content.
4320 * The first entry, new_rq[0], only contains a single iov which contains
4321 * a smb2_transform_hdr and is pre-allocated by the caller.
4322 * This function then populates new_rq[1+] with the content from olq_rq[0+].
4323 *
4324 * The end result is an array of smb_rqst structures where the first structure
4325 * only contains a single iov for the transform header which we then can pass
4326 * to crypt_message().
4327 *
4328 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
4329 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
4330 */
4331static int
4332smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
4333 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004334{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004335 struct page **pages;
4336 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
4337 unsigned int npages;
4338 unsigned int orig_len = 0;
4339 int i, j;
4340 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004341
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004342 for (i = 1; i < num_rqst; i++) {
4343 npages = old_rq[i - 1].rq_npages;
4344 pages = kmalloc_array(npages, sizeof(struct page *),
4345 GFP_KERNEL);
4346 if (!pages)
4347 goto err_free;
4348
4349 new_rq[i].rq_pages = pages;
4350 new_rq[i].rq_npages = npages;
4351 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
4352 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
4353 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
4354 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
4355 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
4356
4357 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
4358
4359 for (j = 0; j < npages; j++) {
4360 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4361 if (!pages[j])
4362 goto err_free;
4363 }
4364
4365 /* copy pages form the old */
4366 for (j = 0; j < npages; j++) {
4367 char *dst, *src;
4368 unsigned int offset, len;
4369
4370 rqst_page_get_length(&new_rq[i], j, &len, &offset);
4371
4372 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
4373 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
4374
4375 memcpy(dst, src, len);
4376 kunmap(new_rq[i].rq_pages[j]);
4377 kunmap(old_rq[i - 1].rq_pages[j]);
4378 }
4379 }
4380
4381 /* fill the 1st iov with a transform header */
Steve French2b2f7542019-06-07 15:16:10 -05004382 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004383
4384 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02004385 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004386 if (rc)
4387 goto err_free;
4388
4389 return rc;
4390
4391err_free:
4392 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
4393 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004394}
4395
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004396static int
4397smb3_is_transform_hdr(void *buf)
4398{
4399 struct smb2_transform_hdr *trhdr = buf;
4400
4401 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
4402}
4403
4404static int
4405decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
4406 unsigned int buf_data_size, struct page **pages,
Rohith Surabattula62593012020-10-08 09:58:41 +00004407 unsigned int npages, unsigned int page_data_size,
4408 bool is_offloaded)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004409{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004410 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004411 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004412 int rc;
4413
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004414 iov[0].iov_base = buf;
4415 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
4416 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
4417 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004418
4419 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004420 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004421 rqst.rq_pages = pages;
4422 rqst.rq_npages = npages;
4423 rqst.rq_pagesz = PAGE_SIZE;
4424 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
4425
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004426 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02004427 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004428
4429 if (rc)
4430 return rc;
4431
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004432 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004433
Rohith Surabattula62593012020-10-08 09:58:41 +00004434 if (!is_offloaded)
4435 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004436
4437 return rc;
4438}
4439
4440static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004441read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
4442 unsigned int npages, unsigned int len)
4443{
4444 int i;
4445 int length;
4446
4447 for (i = 0; i < npages; i++) {
4448 struct page *page = pages[i];
4449 size_t n;
4450
4451 n = len;
4452 if (len >= PAGE_SIZE) {
4453 /* enough data to fill the page */
4454 n = PAGE_SIZE;
4455 len -= n;
4456 } else {
4457 zero_user(page, len, PAGE_SIZE - len);
4458 len = 0;
4459 }
Long Li1dbe3462018-05-30 12:47:55 -07004460 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004461 if (length < 0)
4462 return length;
4463 server->total_read += length;
4464 }
4465
4466 return 0;
4467}
4468
4469static int
4470init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
4471 unsigned int cur_off, struct bio_vec **page_vec)
4472{
4473 struct bio_vec *bvec;
4474 int i;
4475
4476 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
4477 if (!bvec)
4478 return -ENOMEM;
4479
4480 for (i = 0; i < npages; i++) {
4481 bvec[i].bv_page = pages[i];
4482 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
4483 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
4484 data_size -= bvec[i].bv_len;
4485 }
4486
4487 if (data_size != 0) {
4488 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
4489 kfree(bvec);
4490 return -EIO;
4491 }
4492
4493 *page_vec = bvec;
4494 return 0;
4495}
4496
4497static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004498handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
4499 char *buf, unsigned int buf_len, struct page **pages,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004500 unsigned int npages, unsigned int page_data_size,
4501 bool is_offloaded)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004502{
4503 unsigned int data_offset;
4504 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004505 unsigned int cur_off;
4506 unsigned int cur_page_idx;
4507 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004508 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10004509 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004510 struct bio_vec *bvec = NULL;
4511 struct iov_iter iter;
4512 struct kvec iov;
4513 int length;
Long Li74dcf412017-11-22 17:38:46 -07004514 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004515
4516 if (shdr->Command != SMB2_READ) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004517 cifs_server_dbg(VFS, "only big read responses are supported\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004518 return -ENOTSUPP;
4519 }
4520
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004521 if (server->ops->is_session_expired &&
4522 server->ops->is_session_expired(buf)) {
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004523 if (!is_offloaded)
4524 cifs_reconnect(server);
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004525 return -1;
4526 }
4527
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004528 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08004529 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004530 return -1;
4531
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004532 /* set up first two iov to get credits */
4533 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004534 rdata->iov[0].iov_len = 0;
4535 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004536 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004537 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004538 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4539 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4540 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4541 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4542
4543 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004544 if (rdata->result != 0) {
4545 cifs_dbg(FYI, "%s: server returned error %d\n",
4546 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004547 /* normal error on read response */
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004548 if (is_offloaded)
4549 mid->mid_state = MID_RESPONSE_RECEIVED;
4550 else
4551 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004552 return 0;
4553 }
4554
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004555 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07004556#ifdef CONFIG_CIFS_SMB_DIRECT
4557 use_rdma_mr = rdata->mr;
4558#endif
4559 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004560
4561 if (data_offset < server->vals->read_rsp_size) {
4562 /*
4563 * win2k8 sometimes sends an offset of 0 when the read
4564 * is beyond the EOF. Treat it as if the data starts just after
4565 * the header.
4566 */
4567 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4568 __func__, data_offset);
4569 data_offset = server->vals->read_rsp_size;
4570 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4571 /* data_offset is beyond the end of smallbuf */
4572 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4573 __func__, data_offset);
4574 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004575 if (is_offloaded)
4576 mid->mid_state = MID_RESPONSE_MALFORMED;
4577 else
4578 dequeue_mid(mid, rdata->result);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004579 return 0;
4580 }
4581
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004582 pad_len = data_offset - server->vals->read_rsp_size;
4583
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004584 if (buf_len <= data_offset) {
4585 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004586 cur_page_idx = pad_len / PAGE_SIZE;
4587 cur_off = pad_len % PAGE_SIZE;
4588
4589 if (cur_page_idx != 0) {
4590 /* data offset is beyond the 1st page of response */
4591 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4592 __func__, data_offset);
4593 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004594 if (is_offloaded)
4595 mid->mid_state = MID_RESPONSE_MALFORMED;
4596 else
4597 dequeue_mid(mid, rdata->result);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004598 return 0;
4599 }
4600
4601 if (data_len > page_data_size - pad_len) {
4602 /* data_len is corrupt -- discard frame */
4603 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004604 if (is_offloaded)
4605 mid->mid_state = MID_RESPONSE_MALFORMED;
4606 else
4607 dequeue_mid(mid, rdata->result);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004608 return 0;
4609 }
4610
4611 rdata->result = init_read_bvec(pages, npages, page_data_size,
4612 cur_off, &bvec);
4613 if (rdata->result != 0) {
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004614 if (is_offloaded)
4615 mid->mid_state = MID_RESPONSE_MALFORMED;
4616 else
4617 dequeue_mid(mid, rdata->result);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004618 return 0;
4619 }
4620
David Howellsaa563d72018-10-20 00:57:56 +01004621 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004622 } else if (buf_len >= data_offset + data_len) {
4623 /* read response payload is in buf */
4624 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
4625 iov.iov_base = buf + data_offset;
4626 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01004627 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004628 } else {
4629 /* read response payload cannot be in both buf and pages */
4630 WARN_ONCE(1, "buf can not contain only a part of read data");
4631 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004632 if (is_offloaded)
4633 mid->mid_state = MID_RESPONSE_MALFORMED;
4634 else
4635 dequeue_mid(mid, rdata->result);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004636 return 0;
4637 }
4638
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004639 length = rdata->copy_into_pages(server, rdata, &iter);
4640
4641 kfree(bvec);
4642
4643 if (length < 0)
4644 return length;
4645
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004646 if (is_offloaded)
4647 mid->mid_state = MID_RESPONSE_RECEIVED;
4648 else
4649 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004650 return length;
4651}
4652
Steve French35cf94a2019-09-07 01:09:49 -05004653struct smb2_decrypt_work {
4654 struct work_struct decrypt;
4655 struct TCP_Server_Info *server;
4656 struct page **ppages;
4657 char *buf;
4658 unsigned int npages;
4659 unsigned int len;
4660};
4661
4662
4663static void smb2_decrypt_offload(struct work_struct *work)
4664{
4665 struct smb2_decrypt_work *dw = container_of(work,
4666 struct smb2_decrypt_work, decrypt);
4667 int i, rc;
4668 struct mid_q_entry *mid;
4669
4670 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
Rohith Surabattula62593012020-10-08 09:58:41 +00004671 dw->ppages, dw->npages, dw->len, true);
Steve French35cf94a2019-09-07 01:09:49 -05004672 if (rc) {
4673 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4674 goto free_pages;
4675 }
4676
Steve French22553972019-09-13 16:47:31 -05004677 dw->server->lstrp = jiffies;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004678 mid = smb2_find_dequeue_mid(dw->server, dw->buf);
Steve French35cf94a2019-09-07 01:09:49 -05004679 if (mid == NULL)
4680 cifs_dbg(FYI, "mid not found\n");
4681 else {
4682 mid->decrypted = true;
4683 rc = handle_read_data(dw->server, mid, dw->buf,
4684 dw->server->vals->read_rsp_size,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004685 dw->ppages, dw->npages, dw->len,
4686 true);
Rohith Surabattula12541002020-10-29 06:07:56 +00004687 if (rc >= 0) {
4688#ifdef CONFIG_CIFS_STATS2
4689 mid->when_received = jiffies;
4690#endif
Rohith Surabattula9e550b02021-02-16 10:40:45 +00004691 if (dw->server->ops->is_network_name_deleted)
4692 dw->server->ops->is_network_name_deleted(dw->buf,
4693 dw->server);
4694
Rohith Surabattula12541002020-10-29 06:07:56 +00004695 mid->callback(mid);
4696 } else {
4697 spin_lock(&GlobalMid_Lock);
4698 if (dw->server->tcpStatus == CifsNeedReconnect) {
4699 mid->mid_state = MID_RETRY_NEEDED;
4700 spin_unlock(&GlobalMid_Lock);
4701 mid->callback(mid);
4702 } else {
4703 mid->mid_state = MID_REQUEST_SUBMITTED;
4704 mid->mid_flags &= ~(MID_DELETED);
4705 list_add_tail(&mid->qhead,
4706 &dw->server->pending_mid_q);
4707 spin_unlock(&GlobalMid_Lock);
4708 }
4709 }
Steve French22553972019-09-13 16:47:31 -05004710 cifs_mid_q_entry_release(mid);
Steve French35cf94a2019-09-07 01:09:49 -05004711 }
4712
Steve French35cf94a2019-09-07 01:09:49 -05004713free_pages:
4714 for (i = dw->npages-1; i >= 0; i--)
4715 put_page(dw->ppages[i]);
4716
4717 kfree(dw->ppages);
4718 cifs_small_buf_release(dw->buf);
Steve Frencha08d8972019-10-26 16:00:44 -05004719 kfree(dw);
Steve French35cf94a2019-09-07 01:09:49 -05004720}
4721
4722
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004723static int
Steve French35cf94a2019-09-07 01:09:49 -05004724receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4725 int *num_mids)
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004726{
4727 char *buf = server->smallbuf;
4728 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4729 unsigned int npages;
4730 struct page **pages;
4731 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004732 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004733 int rc;
4734 int i = 0;
Steve French35cf94a2019-09-07 01:09:49 -05004735 struct smb2_decrypt_work *dw;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004736
Steve French35cf94a2019-09-07 01:09:49 -05004737 *num_mids = 1;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004738 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004739 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4740
4741 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4742 if (rc < 0)
4743 return rc;
4744 server->total_read += rc;
4745
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004746 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11004747 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004748 npages = DIV_ROUND_UP(len, PAGE_SIZE);
4749
4750 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
4751 if (!pages) {
4752 rc = -ENOMEM;
4753 goto discard_data;
4754 }
4755
4756 for (; i < npages; i++) {
4757 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4758 if (!pages[i]) {
4759 rc = -ENOMEM;
4760 goto discard_data;
4761 }
4762 }
4763
4764 /* read read data into pages */
4765 rc = read_data_into_pages(server, pages, npages, len);
4766 if (rc)
4767 goto free_pages;
4768
Pavel Shilovsky350be252017-04-10 10:31:33 -07004769 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004770 if (rc)
4771 goto free_pages;
4772
Steve French35cf94a2019-09-07 01:09:49 -05004773 /*
4774 * For large reads, offload to different thread for better performance,
4775 * use more cores decrypting which can be expensive
4776 */
4777
Steve French10328c42019-09-09 13:30:15 -05004778 if ((server->min_offload) && (server->in_flight > 1) &&
Steve French563317e2019-09-08 23:22:02 -05004779 (server->pdu_size >= server->min_offload)) {
Steve French35cf94a2019-09-07 01:09:49 -05004780 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4781 if (dw == NULL)
4782 goto non_offloaded_decrypt;
4783
4784 dw->buf = server->smallbuf;
4785 server->smallbuf = (char *)cifs_small_buf_get();
4786
4787 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4788
4789 dw->npages = npages;
4790 dw->server = server;
4791 dw->ppages = pages;
4792 dw->len = len;
Steve Frencha08d8972019-10-26 16:00:44 -05004793 queue_work(decrypt_wq, &dw->decrypt);
Steve French35cf94a2019-09-07 01:09:49 -05004794 *num_mids = 0; /* worker thread takes care of finding mid */
4795 return -1;
4796 }
4797
4798non_offloaded_decrypt:
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004799 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Rohith Surabattula62593012020-10-08 09:58:41 +00004800 pages, npages, len, false);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004801 if (rc)
4802 goto free_pages;
4803
4804 *mid = smb2_find_mid(server, buf);
4805 if (*mid == NULL)
4806 cifs_dbg(FYI, "mid not found\n");
4807 else {
4808 cifs_dbg(FYI, "mid found\n");
4809 (*mid)->decrypted = true;
4810 rc = handle_read_data(server, *mid, buf,
4811 server->vals->read_rsp_size,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004812 pages, npages, len, false);
Rohith Surabattula9e550b02021-02-16 10:40:45 +00004813 if (rc >= 0) {
4814 if (server->ops->is_network_name_deleted) {
4815 server->ops->is_network_name_deleted(buf,
4816 server);
4817 }
4818 }
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004819 }
4820
4821free_pages:
4822 for (i = i - 1; i >= 0; i--)
4823 put_page(pages[i]);
4824 kfree(pages);
4825 return rc;
4826discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07004827 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004828 goto free_pages;
4829}
4830
4831static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004832receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004833 struct mid_q_entry **mids, char **bufs,
4834 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004835{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004836 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004837 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004838 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004839 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004840 unsigned int buf_size;
4841 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004842 int next_is_large;
4843 char *next_buffer = NULL;
4844
4845 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004846
4847 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004848 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004849 server->large_buf = true;
4850 memcpy(server->bigbuf, buf, server->total_read);
4851 buf = server->bigbuf;
4852 }
4853
4854 /* now read the rest */
4855 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004856 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004857 if (length < 0)
4858 return length;
4859 server->total_read += length;
4860
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004861 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Rohith Surabattula62593012020-10-08 09:58:41 +00004862 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004863 if (length)
4864 return length;
4865
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004866 next_is_large = server->large_buf;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004867one_more:
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004868 shdr = (struct smb2_sync_hdr *)buf;
4869 if (shdr->NextCommand) {
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004870 if (next_is_large)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004871 next_buffer = (char *)cifs_buf_get();
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004872 else
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004873 next_buffer = (char *)cifs_small_buf_get();
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004874 memcpy(next_buffer,
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004875 buf + le32_to_cpu(shdr->NextCommand),
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004876 pdu_length - le32_to_cpu(shdr->NextCommand));
4877 }
4878
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004879 mid_entry = smb2_find_mid(server, buf);
4880 if (mid_entry == NULL)
4881 cifs_dbg(FYI, "mid not found\n");
4882 else {
4883 cifs_dbg(FYI, "mid found\n");
4884 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004885 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004886 }
4887
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004888 if (*num_mids >= MAX_COMPOUND) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004889 cifs_server_dbg(VFS, "too many PDUs in compound\n");
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004890 return -1;
4891 }
4892 bufs[*num_mids] = buf;
4893 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004894
4895 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004896 ret = mid_entry->handle(server, mid_entry);
4897 else
4898 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004899
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004900 if (ret == 0 && shdr->NextCommand) {
4901 pdu_length -= le32_to_cpu(shdr->NextCommand);
4902 server->large_buf = next_is_large;
4903 if (next_is_large)
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004904 server->bigbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004905 else
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004906 server->smallbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004907 goto one_more;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004908 } else if (ret != 0) {
4909 /*
4910 * ret != 0 here means that we didn't get to handle_mid() thus
4911 * server->smallbuf and server->bigbuf are still valid. We need
4912 * to free next_buffer because it is not going to be used
4913 * anywhere.
4914 */
4915 if (next_is_large)
4916 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4917 else
4918 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004919 }
4920
4921 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004922}
4923
4924static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004925smb3_receive_transform(struct TCP_Server_Info *server,
4926 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004927{
4928 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004929 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004930 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4931 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4932
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004933 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004934 sizeof(struct smb2_sync_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004935 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004936 pdu_length);
4937 cifs_reconnect(server);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004938 return -ECONNABORTED;
4939 }
4940
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004941 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004942 cifs_server_dbg(VFS, "Transform message is broken\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004943 cifs_reconnect(server);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004944 return -ECONNABORTED;
4945 }
4946
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004947 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004948 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
Steve French35cf94a2019-09-07 01:09:49 -05004949 return receive_encrypted_read(server, &mids[0], num_mids);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08004950 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004951
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004952 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004953}
4954
4955int
4956smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4957{
4958 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
4959
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004960 return handle_read_data(server, mid, buf, server->pdu_size,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004961 NULL, 0, 0, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004962}
4963
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10004964static int
4965smb2_next_header(char *buf)
4966{
4967 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
4968 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
4969
4970 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
4971 return sizeof(struct smb2_transform_hdr) +
4972 le32_to_cpu(t_hdr->OriginalMessageSize);
4973
4974 return le32_to_cpu(hdr->NextCommand);
4975}
4976
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004977static int
4978smb2_make_node(unsigned int xid, struct inode *inode,
4979 struct dentry *dentry, struct cifs_tcon *tcon,
Al Viro55869132021-03-18 01:38:53 -04004980 const char *full_path, umode_t mode, dev_t dev)
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004981{
4982 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4983 int rc = -EPERM;
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004984 FILE_ALL_INFO *buf = NULL;
Aurelien Aptel7c065142020-06-04 17:23:55 +02004985 struct cifs_io_parms io_parms = {0};
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05004986 __u32 oplock = 0;
4987 struct cifs_fid fid;
4988 struct cifs_open_parms oparms;
4989 unsigned int bytes_written;
4990 struct win_dev *pdev;
4991 struct kvec iov[2];
4992
4993 /*
4994 * Check if mounted with mount parm 'sfu' mount parm.
4995 * SFU emulation should work with all servers, but only
4996 * supports block and char device (no socket & fifo),
4997 * and was used by default in earlier versions of Windows
4998 */
4999 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
5000 goto out;
5001
5002 /*
5003 * TODO: Add ability to create instead via reparse point. Windows (e.g.
5004 * their current NFS server) uses this approach to expose special files
5005 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
5006 */
5007
5008 if (!S_ISCHR(mode) && !S_ISBLK(mode))
5009 goto out;
5010
5011 cifs_dbg(FYI, "sfu compat create special file\n");
5012
5013 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
5014 if (buf == NULL) {
5015 rc = -ENOMEM;
5016 goto out;
5017 }
5018
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005019 oparms.tcon = tcon;
5020 oparms.cifs_sb = cifs_sb;
5021 oparms.desired_access = GENERIC_WRITE;
Amir Goldstein0f060932020-02-03 21:46:43 +02005022 oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
5023 CREATE_OPTION_SPECIAL);
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005024 oparms.disposition = FILE_CREATE;
5025 oparms.path = full_path;
5026 oparms.fid = &fid;
5027 oparms.reconnect = false;
5028
5029 if (tcon->ses->server->oplocks)
5030 oplock = REQ_OPLOCK;
5031 else
5032 oplock = 0;
5033 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
5034 if (rc)
5035 goto out;
5036
5037 /*
5038 * BB Do not bother to decode buf since no local inode yet to put
5039 * timestamps in, but we can reuse it safely.
5040 */
5041
5042 pdev = (struct win_dev *)buf;
5043 io_parms.pid = current->tgid;
5044 io_parms.tcon = tcon;
5045 io_parms.offset = 0;
5046 io_parms.length = sizeof(struct win_dev);
5047 iov[1].iov_base = buf;
5048 iov[1].iov_len = sizeof(struct win_dev);
5049 if (S_ISCHR(mode)) {
5050 memcpy(pdev->type, "IntxCHR", 8);
5051 pdev->major = cpu_to_le64(MAJOR(dev));
5052 pdev->minor = cpu_to_le64(MINOR(dev));
5053 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
5054 &bytes_written, iov, 1);
5055 } else if (S_ISBLK(mode)) {
5056 memcpy(pdev->type, "IntxBLK", 8);
5057 pdev->major = cpu_to_le64(MAJOR(dev));
5058 pdev->minor = cpu_to_le64(MINOR(dev));
5059 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
5060 &bytes_written, iov, 1);
5061 }
5062 tcon->ses->server->ops->close(xid, tcon, &fid);
5063 d_drop(dentry);
5064
5065 /* FIXME: add code here to set EAs */
5066out:
5067 kfree(buf);
5068 return rc;
5069}
5070
5071
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005072struct smb_version_operations smb20_operations = {
5073 .compare_fids = smb2_compare_fids,
5074 .setup_request = smb2_setup_request,
5075 .setup_async_request = smb2_setup_async_request,
5076 .check_receive = smb2_check_receive,
5077 .add_credits = smb2_add_credits,
5078 .set_credits = smb2_set_credits,
5079 .get_credits_field = smb2_get_credits_field,
5080 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04005081 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005082 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005083 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005084 .read_data_offset = smb2_read_data_offset,
5085 .read_data_length = smb2_read_data_length,
5086 .map_error = map_smb2_to_linux_error,
5087 .find_mid = smb2_find_mid,
5088 .check_message = smb2_check_message,
5089 .dump_detail = smb2_dump_detail,
5090 .clear_stats = smb2_clear_stats,
5091 .print_stats = smb2_print_stats,
5092 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005093 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00005094 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005095 .need_neg = smb2_need_neg,
5096 .negotiate = smb2_negotiate,
5097 .negotiate_wsize = smb2_negotiate_wsize,
5098 .negotiate_rsize = smb2_negotiate_rsize,
5099 .sess_setup = SMB2_sess_setup,
5100 .logoff = SMB2_logoff,
5101 .tree_connect = SMB2_tcon,
5102 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05005103 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005104 .is_path_accessible = smb2_is_path_accessible,
5105 .can_echo = smb2_can_echo,
5106 .echo = SMB2_echo,
5107 .query_path_info = smb2_query_path_info,
5108 .get_srv_inum = smb2_get_srv_inum,
5109 .query_file_info = smb2_query_file_info,
5110 .set_path_size = smb2_set_path_size,
5111 .set_file_size = smb2_set_file_size,
5112 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05005113 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005114 .mkdir = smb2_mkdir,
5115 .mkdir_setinfo = smb2_mkdir_setinfo,
5116 .rmdir = smb2_rmdir,
5117 .unlink = smb2_unlink,
5118 .rename = smb2_rename_path,
5119 .create_hardlink = smb2_create_hardlink,
5120 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01005121 .query_mf_symlink = smb3_query_mf_symlink,
5122 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005123 .open = smb2_open_file,
5124 .set_fid = smb2_set_fid,
5125 .close = smb2_close_file,
5126 .flush = smb2_flush_file,
5127 .async_readv = smb2_async_readv,
5128 .async_writev = smb2_async_writev,
5129 .sync_read = smb2_sync_read,
5130 .sync_write = smb2_sync_write,
5131 .query_dir_first = smb2_query_dir_first,
5132 .query_dir_next = smb2_query_dir_next,
5133 .close_dir = smb2_close_dir,
5134 .calc_smb_size = smb2_calc_size,
5135 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005136 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005137 .oplock_response = smb2_oplock_response,
5138 .queryfs = smb2_queryfs,
5139 .mand_lock = smb2_mand_lock,
5140 .mand_unlock_range = smb2_unlock_range,
5141 .push_mand_locks = smb2_push_mandatory_locks,
5142 .get_lease_key = smb2_get_lease_key,
5143 .set_lease_key = smb2_set_lease_key,
5144 .new_lease_key = smb2_new_lease_key,
5145 .calc_signature = smb2_calc_signature,
5146 .is_read_op = smb2_is_read_op,
5147 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005148 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04005149 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005150 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04005151 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04005152 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005153 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305154 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005155#ifdef CONFIG_CIFS_XATTR
5156 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005157 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005158#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05005159 .get_acl = get_smb2_acl,
5160 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05005161 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005162 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005163 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005164 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005165 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005166 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005167 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005168 .is_network_name_deleted = smb2_is_network_name_deleted,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005169};
5170
Steve French1080ef72011-02-24 18:07:19 +00005171struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07005172 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005173 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04005174 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005175 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04005176 .add_credits = smb2_add_credits,
5177 .set_credits = smb2_set_credits,
5178 .get_credits_field = smb2_get_credits_field,
5179 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04005180 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08005181 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005182 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005183 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005184 .read_data_offset = smb2_read_data_offset,
5185 .read_data_length = smb2_read_data_length,
5186 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04005187 .find_mid = smb2_find_mid,
5188 .check_message = smb2_check_message,
5189 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04005190 .clear_stats = smb2_clear_stats,
5191 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005192 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005193 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07005194 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04005195 .need_neg = smb2_need_neg,
5196 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07005197 .negotiate_wsize = smb2_negotiate_wsize,
5198 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04005199 .sess_setup = SMB2_sess_setup,
5200 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04005201 .tree_connect = SMB2_tcon,
5202 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05005203 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04005204 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04005205 .can_echo = smb2_can_echo,
5206 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04005207 .query_path_info = smb2_query_path_info,
5208 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07005209 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07005210 .set_path_size = smb2_set_path_size,
5211 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07005212 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05005213 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04005214 .mkdir = smb2_mkdir,
5215 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04005216 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07005217 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07005218 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07005219 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04005220 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05005221 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05005222 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07005223 .open = smb2_open_file,
5224 .set_fid = smb2_set_fid,
5225 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07005226 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005227 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07005228 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07005229 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07005230 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07005231 .query_dir_first = smb2_query_dir_first,
5232 .query_dir_next = smb2_query_dir_next,
5233 .close_dir = smb2_close_dir,
5234 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07005235 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005236 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005237 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005238 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005239 .mand_lock = smb2_mand_lock,
5240 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07005241 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07005242 .get_lease_key = smb2_get_lease_key,
5243 .set_lease_key = smb2_set_lease_key,
5244 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06005245 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005246 .is_read_op = smb21_is_read_op,
5247 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005248 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04005249 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005250 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04005251 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04005252 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05005253 .enum_snapshots = smb3_enum_snapshots,
Steve French2c6251a2020-02-12 22:37:08 -06005254 .notify = smb3_notify,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005255 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305256 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005257#ifdef CONFIG_CIFS_XATTR
5258 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005259 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005260#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05005261 .get_acl = get_smb2_acl,
5262 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05005263 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005264 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005265 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005266 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005267 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005268 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005269 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005270 .is_network_name_deleted = smb2_is_network_name_deleted,
Steve French38107d42012-12-08 22:08:06 -06005271};
5272
Steve French38107d42012-12-08 22:08:06 -06005273struct smb_version_operations smb30_operations = {
5274 .compare_fids = smb2_compare_fids,
5275 .setup_request = smb2_setup_request,
5276 .setup_async_request = smb2_setup_async_request,
5277 .check_receive = smb2_check_receive,
5278 .add_credits = smb2_add_credits,
5279 .set_credits = smb2_set_credits,
5280 .get_credits_field = smb2_get_credits_field,
5281 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04005282 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08005283 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06005284 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005285 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06005286 .read_data_offset = smb2_read_data_offset,
5287 .read_data_length = smb2_read_data_length,
5288 .map_error = map_smb2_to_linux_error,
5289 .find_mid = smb2_find_mid,
5290 .check_message = smb2_check_message,
5291 .dump_detail = smb2_dump_detail,
5292 .clear_stats = smb2_clear_stats,
5293 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05005294 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06005295 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005296 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07005297 .downgrade_oplock = smb3_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06005298 .need_neg = smb2_need_neg,
5299 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05005300 .negotiate_wsize = smb3_negotiate_wsize,
5301 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06005302 .sess_setup = SMB2_sess_setup,
5303 .logoff = SMB2_logoff,
5304 .tree_connect = SMB2_tcon,
5305 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005306 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06005307 .is_path_accessible = smb2_is_path_accessible,
5308 .can_echo = smb2_can_echo,
5309 .echo = SMB2_echo,
5310 .query_path_info = smb2_query_path_info,
Steve French2e4564b2020-10-22 22:03:14 -05005311 /* WSL tags introduced long after smb2.1, enable for SMB3, 3.11 only */
5312 .query_reparse_tag = smb2_query_reparse_tag,
Steve French38107d42012-12-08 22:08:06 -06005313 .get_srv_inum = smb2_get_srv_inum,
5314 .query_file_info = smb2_query_file_info,
5315 .set_path_size = smb2_set_path_size,
5316 .set_file_size = smb2_set_file_size,
5317 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05005318 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06005319 .mkdir = smb2_mkdir,
5320 .mkdir_setinfo = smb2_mkdir_setinfo,
5321 .rmdir = smb2_rmdir,
5322 .unlink = smb2_unlink,
5323 .rename = smb2_rename_path,
5324 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04005325 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05005326 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05005327 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06005328 .open = smb2_open_file,
5329 .set_fid = smb2_set_fid,
5330 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06005331 .close_getattr = smb2_close_getattr,
Steve French38107d42012-12-08 22:08:06 -06005332 .flush = smb2_flush_file,
5333 .async_readv = smb2_async_readv,
5334 .async_writev = smb2_async_writev,
5335 .sync_read = smb2_sync_read,
5336 .sync_write = smb2_sync_write,
5337 .query_dir_first = smb2_query_dir_first,
5338 .query_dir_next = smb2_query_dir_next,
5339 .close_dir = smb2_close_dir,
5340 .calc_smb_size = smb2_calc_size,
5341 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005342 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06005343 .oplock_response = smb2_oplock_response,
5344 .queryfs = smb2_queryfs,
5345 .mand_lock = smb2_mand_lock,
5346 .mand_unlock_range = smb2_unlock_range,
5347 .push_mand_locks = smb2_push_mandatory_locks,
5348 .get_lease_key = smb2_get_lease_key,
5349 .set_lease_key = smb2_set_lease_key,
5350 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06005351 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06005352 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05005353 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005354 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04005355 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005356 .create_lease_buf = smb3_create_lease_buf,
5357 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005358 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05005359 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06005360 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04005361 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04005362 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05005363 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05005364 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06005365 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07005366 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005367 .is_transform_hdr = smb3_is_transform_hdr,
5368 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005369 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305370 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005371#ifdef CONFIG_CIFS_XATTR
5372 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005373 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005374#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05005375 .get_acl = get_smb2_acl,
5376 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05005377 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005378 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005379 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005380 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005381 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005382 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005383 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005384 .is_network_name_deleted = smb2_is_network_name_deleted,
Steve French1080ef72011-02-24 18:07:19 +00005385};
5386
Steve Frenchaab18932015-06-23 23:37:11 -05005387struct smb_version_operations smb311_operations = {
5388 .compare_fids = smb2_compare_fids,
5389 .setup_request = smb2_setup_request,
5390 .setup_async_request = smb2_setup_async_request,
5391 .check_receive = smb2_check_receive,
5392 .add_credits = smb2_add_credits,
5393 .set_credits = smb2_set_credits,
5394 .get_credits_field = smb2_get_credits_field,
5395 .get_credits = smb2_get_credits,
5396 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08005397 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05005398 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005399 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05005400 .read_data_offset = smb2_read_data_offset,
5401 .read_data_length = smb2_read_data_length,
5402 .map_error = map_smb2_to_linux_error,
5403 .find_mid = smb2_find_mid,
5404 .check_message = smb2_check_message,
5405 .dump_detail = smb2_dump_detail,
5406 .clear_stats = smb2_clear_stats,
5407 .print_stats = smb2_print_stats,
5408 .dump_share_caps = smb2_dump_share_caps,
5409 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005410 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07005411 .downgrade_oplock = smb3_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05005412 .need_neg = smb2_need_neg,
5413 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05005414 .negotiate_wsize = smb3_negotiate_wsize,
5415 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05005416 .sess_setup = SMB2_sess_setup,
5417 .logoff = SMB2_logoff,
5418 .tree_connect = SMB2_tcon,
5419 .tree_disconnect = SMB2_tdis,
5420 .qfs_tcon = smb3_qfs_tcon,
5421 .is_path_accessible = smb2_is_path_accessible,
5422 .can_echo = smb2_can_echo,
5423 .echo = SMB2_echo,
5424 .query_path_info = smb2_query_path_info,
Steve French2e4564b2020-10-22 22:03:14 -05005425 .query_reparse_tag = smb2_query_reparse_tag,
Steve Frenchaab18932015-06-23 23:37:11 -05005426 .get_srv_inum = smb2_get_srv_inum,
5427 .query_file_info = smb2_query_file_info,
5428 .set_path_size = smb2_set_path_size,
5429 .set_file_size = smb2_set_file_size,
5430 .set_file_info = smb2_set_file_info,
5431 .set_compression = smb2_set_compression,
5432 .mkdir = smb2_mkdir,
5433 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05005434 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05005435 .rmdir = smb2_rmdir,
5436 .unlink = smb2_unlink,
5437 .rename = smb2_rename_path,
5438 .create_hardlink = smb2_create_hardlink,
5439 .query_symlink = smb2_query_symlink,
5440 .query_mf_symlink = smb3_query_mf_symlink,
5441 .create_mf_symlink = smb3_create_mf_symlink,
5442 .open = smb2_open_file,
5443 .set_fid = smb2_set_fid,
5444 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06005445 .close_getattr = smb2_close_getattr,
Steve Frenchaab18932015-06-23 23:37:11 -05005446 .flush = smb2_flush_file,
5447 .async_readv = smb2_async_readv,
5448 .async_writev = smb2_async_writev,
5449 .sync_read = smb2_sync_read,
5450 .sync_write = smb2_sync_write,
5451 .query_dir_first = smb2_query_dir_first,
5452 .query_dir_next = smb2_query_dir_next,
5453 .close_dir = smb2_close_dir,
5454 .calc_smb_size = smb2_calc_size,
5455 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005456 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05005457 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05005458 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05005459 .mand_lock = smb2_mand_lock,
5460 .mand_unlock_range = smb2_unlock_range,
5461 .push_mand_locks = smb2_push_mandatory_locks,
5462 .get_lease_key = smb2_get_lease_key,
5463 .set_lease_key = smb2_set_lease_key,
5464 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06005465 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05005466 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05005467 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05005468 .is_read_op = smb21_is_read_op,
5469 .set_oplock_level = smb3_set_oplock_level,
5470 .create_lease_buf = smb3_create_lease_buf,
5471 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005472 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07005473 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05005474/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
5475 .wp_retry_size = smb2_wp_retry_size,
5476 .dir_needs_close = smb2_dir_needs_close,
5477 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05005478 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06005479 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07005480 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005481 .is_transform_hdr = smb3_is_transform_hdr,
5482 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005483 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305484 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005485#ifdef CONFIG_CIFS_XATTR
5486 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005487 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005488#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10005489 .get_acl = get_smb2_acl,
5490 .get_acl_by_fid = get_smb2_acl_by_fid,
5491 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005492 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005493 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005494 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005495 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005496 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005497 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005498 .is_network_name_deleted = smb2_is_network_name_deleted,
Steve Frenchaab18932015-06-23 23:37:11 -05005499};
Steve Frenchaab18932015-06-23 23:37:11 -05005500
Steve Frenchdd446b12012-11-28 23:21:06 -06005501struct smb_version_values smb20_values = {
5502 .version_string = SMB20_VERSION_STRING,
5503 .protocol_id = SMB20_PROT_ID,
5504 .req_capabilities = 0, /* MBZ */
5505 .large_lock_type = 0,
5506 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5507 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5508 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005509 .header_size = sizeof(struct smb2_sync_hdr),
5510 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06005511 .max_header_size = MAX_SMB2_HDR_SIZE,
5512 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5513 .lock_cmd = SMB2_LOCK,
5514 .cap_unix = 0,
5515 .cap_nt_find = SMB2_NT_FIND,
5516 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005517 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5518 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005519 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06005520};
5521
Steve French1080ef72011-02-24 18:07:19 +00005522struct smb_version_values smb21_values = {
5523 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005524 .protocol_id = SMB21_PROT_ID,
5525 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
5526 .large_lock_type = 0,
5527 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5528 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5529 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005530 .header_size = sizeof(struct smb2_sync_hdr),
5531 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005532 .max_header_size = MAX_SMB2_HDR_SIZE,
5533 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5534 .lock_cmd = SMB2_LOCK,
5535 .cap_unix = 0,
5536 .cap_nt_find = SMB2_NT_FIND,
5537 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005538 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5539 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005540 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05005541};
5542
Steve French9764c022017-09-17 10:41:35 -05005543struct smb_version_values smb3any_values = {
5544 .version_string = SMB3ANY_VERSION_STRING,
5545 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005546 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005547 .large_lock_type = 0,
5548 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5549 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5550 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005551 .header_size = sizeof(struct smb2_sync_hdr),
5552 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005553 .max_header_size = MAX_SMB2_HDR_SIZE,
5554 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5555 .lock_cmd = SMB2_LOCK,
5556 .cap_unix = 0,
5557 .cap_nt_find = SMB2_NT_FIND,
5558 .cap_large_files = SMB2_LARGE_FILES,
5559 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5560 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5561 .create_lease_size = sizeof(struct create_lease_v2),
5562};
5563
5564struct smb_version_values smbdefault_values = {
5565 .version_string = SMBDEFAULT_VERSION_STRING,
5566 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005567 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005568 .large_lock_type = 0,
5569 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5570 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5571 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005572 .header_size = sizeof(struct smb2_sync_hdr),
5573 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005574 .max_header_size = MAX_SMB2_HDR_SIZE,
5575 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5576 .lock_cmd = SMB2_LOCK,
5577 .cap_unix = 0,
5578 .cap_nt_find = SMB2_NT_FIND,
5579 .cap_large_files = SMB2_LARGE_FILES,
5580 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5581 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5582 .create_lease_size = sizeof(struct create_lease_v2),
5583};
5584
Steve Frenche4aa25e2012-10-01 12:26:22 -05005585struct smb_version_values smb30_values = {
5586 .version_string = SMB30_VERSION_STRING,
5587 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005588 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07005589 .large_lock_type = 0,
5590 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5591 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5592 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005593 .header_size = sizeof(struct smb2_sync_hdr),
5594 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04005595 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005596 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005597 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04005598 .cap_unix = 0,
5599 .cap_nt_find = SMB2_NT_FIND,
5600 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005601 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5602 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005603 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00005604};
Steve French20b6d8b2013-06-12 22:48:41 -05005605
5606struct smb_version_values smb302_values = {
5607 .version_string = SMB302_VERSION_STRING,
5608 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005609 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05005610 .large_lock_type = 0,
5611 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5612 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5613 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005614 .header_size = sizeof(struct smb2_sync_hdr),
5615 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05005616 .max_header_size = MAX_SMB2_HDR_SIZE,
5617 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5618 .lock_cmd = SMB2_LOCK,
5619 .cap_unix = 0,
5620 .cap_nt_find = SMB2_NT_FIND,
5621 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005622 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5623 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005624 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05005625};
Steve French5f7fbf72014-12-17 22:52:58 -06005626
Steve French5f7fbf72014-12-17 22:52:58 -06005627struct smb_version_values smb311_values = {
5628 .version_string = SMB311_VERSION_STRING,
5629 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005630 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06005631 .large_lock_type = 0,
5632 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5633 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5634 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005635 .header_size = sizeof(struct smb2_sync_hdr),
5636 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06005637 .max_header_size = MAX_SMB2_HDR_SIZE,
5638 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5639 .lock_cmd = SMB2_LOCK,
5640 .cap_unix = 0,
5641 .cap_nt_find = SMB2_NT_FIND,
5642 .cap_large_files = SMB2_LARGE_FILES,
5643 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5644 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5645 .create_lease_size = sizeof(struct create_lease_v2),
5646};