blob: 21ef51d338e0ca333e21a81610ab0cec1d6567b6 [file] [log] [blame]
Christoph Probsta205d502019-05-08 21:36:25 +02001// SPDX-License-Identifier: GPL-2.0
Steve French1080ef72011-02-24 18:07:19 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Steve French1080ef72011-02-24 18:07:19 +00006 */
7
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07008#include <linux/pagemap.h>
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07009#include <linux/vfs.h>
Steve Frenchf29ebb42014-07-19 21:44:58 -050010#include <linux/falloc.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070011#include <linux/scatterlist.h>
Tobias Regnery4fa8e502017-03-30 12:34:14 +020012#include <linux/uuid.h>
Aurelien Aptel35adffe2019-09-20 06:29:39 +020013#include <linux/sort.h>
Pavel Shilovsky026e93d2016-11-03 16:47:37 -070014#include <crypto/aead.h>
Christoph Hellwig10c5db22020-05-23 09:30:11 +020015#include <linux/fiemap.h>
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +100016#include "cifsfs.h"
Steve French1080ef72011-02-24 18:07:19 +000017#include "cifsglob.h"
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040018#include "smb2pdu.h"
19#include "smb2proto.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040020#include "cifsproto.h"
21#include "cifs_debug.h"
Pavel Shilovskyb42bf882013-08-14 19:25:21 +040022#include "cifs_unicode.h"
Pavel Shilovsky2e44b282012-09-18 16:20:33 -070023#include "smb2status.h"
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -070024#include "smb2glob.h"
Steve French834170c2016-09-30 21:14:26 -050025#include "cifs_ioctl.h"
Long Li09902f82017-11-22 17:38:39 -070026#include "smbdirect.h"
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -060027#include "fs_context.h"
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040028
Pavel Shilovskyef68e832019-01-18 17:25:36 -080029/* Change credits for different ops and return the total number of credits */
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040030static int
31change_conf(struct TCP_Server_Info *server)
32{
33 server->credits += server->echo_credits + server->oplock_credits;
34 server->oplock_credits = server->echo_credits = 0;
35 switch (server->credits) {
36 case 0:
Pavel Shilovskyef68e832019-01-18 17:25:36 -080037 return 0;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040038 case 1:
39 server->echoes = false;
40 server->oplocks = false;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040041 break;
42 case 2:
43 server->echoes = true;
44 server->oplocks = false;
45 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040046 break;
47 default:
48 server->echoes = true;
Steve Frenche0ddde92015-09-22 09:29:38 -050049 if (enable_oplocks) {
50 server->oplocks = true;
51 server->oplock_credits = 1;
52 } else
53 server->oplocks = false;
54
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040055 server->echo_credits = 1;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040056 }
57 server->credits -= server->echo_credits + server->oplock_credits;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080058 return server->credits + server->echo_credits + server->oplock_credits;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040059}
60
61static void
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080062smb2_add_credits(struct TCP_Server_Info *server,
63 const struct cifs_credits *credits, const int optype)
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040064{
Pavel Shilovskyef68e832019-01-18 17:25:36 -080065 int *val, rc = -1;
Shyam Prasad N6d82c272021-02-03 23:20:46 -080066 int scredits, in_flight;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080067 unsigned int add = credits->value;
68 unsigned int instance = credits->instance;
69 bool reconnect_detected = false;
Shyam Prasad N6d82c272021-02-03 23:20:46 -080070 bool reconnect_with_invalid_credits = false;
Pavel Shilovskyef68e832019-01-18 17:25:36 -080071
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040072 spin_lock(&server->req_lock);
73 val = server->ops->get_credits_field(server, optype);
Steve Frenchb340a4d2018-09-01 01:10:17 -050074
75 /* eg found case where write overlapping reconnect messed up credits */
76 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
Shyam Prasad N6d82c272021-02-03 23:20:46 -080077 reconnect_with_invalid_credits = true;
78
Pavel Shilovsky335b7b62019-01-16 11:12:41 -080079 if ((instance == 0) || (instance == server->reconnect_instance))
80 *val += add;
81 else
82 reconnect_detected = true;
Steve Frenchb340a4d2018-09-01 01:10:17 -050083
Steve French141891f2016-09-23 00:44:16 -050084 if (*val > 65000) {
85 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
Joe Perchesa0a30362020-04-14 22:42:53 -070086 pr_warn_once("server overflowed SMB3 credits\n");
Steve French141891f2016-09-23 00:44:16 -050087 }
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040088 server->in_flight--;
Shyam Prasad N0f56db82021-02-03 22:49:52 -080089 if (server->in_flight == 0 &&
90 ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) &&
91 ((optype & CIFS_OP_MASK) != CIFS_SESS_OP))
Pavel Shilovsky28ea5292012-05-23 16:18:00 +040092 rc = change_conf(server);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -070093 /*
94 * Sometimes server returns 0 credits on oplock break ack - we need to
95 * rebalance credits in this case.
96 */
97 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
98 server->oplocks) {
99 if (server->credits > 1) {
100 server->credits--;
101 server->oplock_credits++;
102 }
103 }
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800104 scredits = *val;
105 in_flight = server->in_flight;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400106 spin_unlock(&server->req_lock);
107 wake_up(&server->request_q);
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800108
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800109 if (reconnect_detected) {
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800110 trace_smb3_reconnect_detected(server->CurrentMid,
111 server->conn_id, server->hostname, scredits, add, in_flight);
112
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800113 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
114 add, instance);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800115 }
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800116
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800117 if (reconnect_with_invalid_credits) {
118 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
119 server->conn_id, server->hostname, scredits, add, in_flight);
120 cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
121 optype, scredits, add);
122 }
123
Pavel Shilovsky82e04572019-01-25 10:56:41 -0800124 if (server->tcpStatus == CifsNeedReconnect
125 || server->tcpStatus == CifsExiting)
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800126 return;
127
128 switch (rc) {
129 case -1:
130 /* change_conf hasn't been executed */
131 break;
132 case 0:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000133 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800134 break;
135 case 1:
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000136 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800137 break;
138 case 2:
139 cifs_dbg(FYI, "disabling oplocks\n");
140 break;
141 default:
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800142 /* change_conf rebalanced credits for different types */
143 break;
Pavel Shilovskyef68e832019-01-18 17:25:36 -0800144 }
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800145
146 trace_smb3_add_credits(server->CurrentMid,
147 server->conn_id, server->hostname, scredits, add, in_flight);
148 cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400149}
150
151static void
152smb2_set_credits(struct TCP_Server_Info *server, const int val)
153{
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800154 int scredits, in_flight;
155
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400156 spin_lock(&server->req_lock);
157 server->credits = val;
Steve French9e1a37d2018-09-19 02:38:17 -0500158 if (val == 1)
159 server->reconnect_instance++;
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800160 scredits = server->credits;
161 in_flight = server->in_flight;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400162 spin_unlock(&server->req_lock);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800163
164 trace_smb3_set_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800165 server->conn_id, server->hostname, scredits, val, in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800166 cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
167
Steve French6e4d3bb2018-09-22 11:25:04 -0500168 /* don't log while holding the lock */
169 if (val == 1)
170 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400171}
172
173static int *
174smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
175{
176 switch (optype) {
177 case CIFS_ECHO_OP:
178 return &server->echo_credits;
179 case CIFS_OBREAK_OP:
180 return &server->oplock_credits;
181 default:
182 return &server->credits;
183 }
184}
185
186static unsigned int
187smb2_get_credits(struct mid_q_entry *mid)
188{
Pavel Shilovsky86a79642019-11-21 11:35:13 -0800189 return mid->credits_received;
Pavel Shilovsky28ea5292012-05-23 16:18:00 +0400190}
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400191
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400192static int
193smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800194 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400195{
196 int rc = 0;
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800197 unsigned int scredits, in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400198
199 spin_lock(&server->req_lock);
200 while (1) {
201 if (server->credits <= 0) {
202 spin_unlock(&server->req_lock);
203 cifs_num_waiters_inc(server);
204 rc = wait_event_killable(server->request_q,
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000205 has_credits(server, &server->credits, 1));
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400206 cifs_num_waiters_dec(server);
207 if (rc)
208 return rc;
209 spin_lock(&server->req_lock);
210 } else {
211 if (server->tcpStatus == CifsExiting) {
212 spin_unlock(&server->req_lock);
213 return -ENOENT;
214 }
215
216 scredits = server->credits;
217 /* can deadlock with reopen */
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800218 if (scredits <= 8) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400219 *num = SMB2_MAX_BUFFER_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800220 credits->value = 0;
221 credits->instance = 0;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400222 break;
223 }
224
Pavel Shilovskyacc58d02019-01-17 08:21:24 -0800225 /* leave some credits for reopen and other ops */
226 scredits -= 8;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400227 *num = min_t(unsigned int, size,
228 scredits * SMB2_MAX_BUFFER_SIZE);
229
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800230 credits->value =
231 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
232 credits->instance = server->reconnect_instance;
233 server->credits -= credits->value;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400234 server->in_flight++;
Steve French1b63f182019-09-09 22:57:11 -0500235 if (server->in_flight > server->max_in_flight)
236 server->max_in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400237 break;
238 }
239 }
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800240 scredits = server->credits;
241 in_flight = server->in_flight;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400242 spin_unlock(&server->req_lock);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800243
244 trace_smb3_add_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800245 server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800246 cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
247 __func__, credits->value, scredits);
248
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400249 return rc;
250}
251
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800252static int
253smb2_adjust_credits(struct TCP_Server_Info *server,
254 struct cifs_credits *credits,
255 const unsigned int payload_size)
256{
257 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800258 int scredits, in_flight;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800259
260 if (!credits->value || credits->value == new_val)
261 return 0;
262
263 if (credits->value < new_val) {
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800264 trace_smb3_too_many_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800265 server->conn_id, server->hostname, 0, credits->value - new_val, 0);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800266 cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
267 credits->value, new_val);
268
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800269 return -ENOTSUPP;
270 }
271
272 spin_lock(&server->req_lock);
273
274 if (server->reconnect_instance != credits->instance) {
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800275 scredits = server->credits;
276 in_flight = server->in_flight;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800277 spin_unlock(&server->req_lock);
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800278
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800279 trace_smb3_reconnect_detected(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800280 server->conn_id, server->hostname, scredits,
281 credits->value - new_val, in_flight);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000282 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800283 credits->value - new_val);
284 return -EAGAIN;
285 }
286
287 server->credits += credits->value - new_val;
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800288 scredits = server->credits;
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800289 in_flight = server->in_flight;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800290 spin_unlock(&server->req_lock);
291 wake_up(&server->request_q);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800292
293 trace_smb3_add_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800294 server->conn_id, server->hostname, scredits,
295 credits->value - new_val, in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -0800296 cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
297 __func__, credits->value - new_val, scredits);
298
Shyam Prasad N6d82c272021-02-03 23:20:46 -0800299 credits->value = new_val;
300
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -0800301 return 0;
302}
303
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400304static __u64
305smb2_get_next_mid(struct TCP_Server_Info *server)
306{
307 __u64 mid;
308 /* for SMB2 we need the current value */
309 spin_lock(&GlobalMid_Lock);
310 mid = server->CurrentMid++;
311 spin_unlock(&GlobalMid_Lock);
312 return mid;
313}
Steve French1080ef72011-02-24 18:07:19 +0000314
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800315static void
316smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
317{
318 spin_lock(&GlobalMid_Lock);
319 if (server->CurrentMid >= val)
320 server->CurrentMid -= val;
321 spin_unlock(&GlobalMid_Lock);
322}
323
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400324static struct mid_q_entry *
Rohith Surabattulaac873aa2020-10-29 05:03:10 +0000325__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400326{
327 struct mid_q_entry *mid;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000328 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700329 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400330
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700331 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000332 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Steve French373512e2015-12-18 13:05:30 -0600333 return NULL;
334 }
335
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400336 spin_lock(&GlobalMid_Lock);
337 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
Sachin Prabhu9235d092014-12-09 17:37:00 +0000338 if ((mid->mid == wire_mid) &&
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400339 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700340 (mid->command == shdr->Command)) {
Lars Persson696e4202018-06-25 14:05:25 +0200341 kref_get(&mid->refcount);
Rohith Surabattulaac873aa2020-10-29 05:03:10 +0000342 if (dequeue) {
343 list_del_init(&mid->qhead);
344 mid->mid_flags |= MID_DELETED;
345 }
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400346 spin_unlock(&GlobalMid_Lock);
347 return mid;
348 }
349 }
350 spin_unlock(&GlobalMid_Lock);
351 return NULL;
352}
353
Rohith Surabattulaac873aa2020-10-29 05:03:10 +0000354static struct mid_q_entry *
355smb2_find_mid(struct TCP_Server_Info *server, char *buf)
356{
357 return __smb2_find_mid(server, buf, false);
358}
359
360static struct mid_q_entry *
361smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
362{
363 return __smb2_find_mid(server, buf, true);
364}
365
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400366static void
Ronnie Sahlberg14547f72018-04-22 14:45:53 -0600367smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400368{
369#ifdef CONFIG_CIFS_DEBUG2
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000370 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400371
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000372 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700373 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
374 shdr->ProcessId);
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000375 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
Steve French71992e622018-05-06 15:58:51 -0500376 server->ops->calc_smb_size(buf, server));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400377#endif
378}
379
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400380static bool
381smb2_need_neg(struct TCP_Server_Info *server)
382{
383 return server->max_read == 0;
384}
385
386static int
387smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
388{
389 int rc;
Christoph Probsta205d502019-05-08 21:36:25 +0200390
Aurelien Aptelf6a6bf72019-09-20 06:22:14 +0200391 cifs_ses_server(ses)->CurrentMid = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400392 rc = SMB2_negotiate(xid, ses);
393 /* BB we probably don't need to retry with modern servers */
394 if (rc == -EAGAIN)
395 rc = -EHOSTDOWN;
396 return rc;
397}
398
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700399static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600400smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700401{
402 struct TCP_Server_Info *server = tcon->ses->server;
403 unsigned int wsize;
404
405 /* start with specified wsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600406 wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE;
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700407 wsize = min_t(unsigned int, wsize, server->max_write);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400408 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
409 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700410
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700411 return wsize;
412}
413
414static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600415smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Steve French3d621232018-09-25 15:33:47 -0500416{
417 struct TCP_Server_Info *server = tcon->ses->server;
418 unsigned int wsize;
419
420 /* start with specified wsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600421 wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE;
Steve French3d621232018-09-25 15:33:47 -0500422 wsize = min_t(unsigned int, wsize, server->max_write);
423#ifdef CONFIG_CIFS_SMB_DIRECT
424 if (server->rdma) {
425 if (server->sign)
Long Lif7950cb2020-03-26 19:42:24 -0700426 /*
427 * Account for SMB2 data transfer packet header and
428 * possible encryption header
429 */
Steve French3d621232018-09-25 15:33:47 -0500430 wsize = min_t(unsigned int,
Long Lif7950cb2020-03-26 19:42:24 -0700431 wsize,
432 server->smbd_conn->max_fragmented_send_size -
433 SMB2_READWRITE_PDU_HEADER_SIZE -
434 sizeof(struct smb2_transform_hdr));
Steve French3d621232018-09-25 15:33:47 -0500435 else
436 wsize = min_t(unsigned int,
437 wsize, server->smbd_conn->max_readwrite_size);
438 }
439#endif
440 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
441 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
442
443 return wsize;
444}
445
446static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600447smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700448{
449 struct TCP_Server_Info *server = tcon->ses->server;
450 unsigned int rsize;
451
452 /* start with specified rsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600453 rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE;
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700454 rsize = min_t(unsigned int, rsize, server->max_read);
Pavel Shilovskybed9da02014-06-25 11:28:57 +0400455
456 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
457 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700458
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -0700459 return rsize;
460}
461
Steve French3d621232018-09-25 15:33:47 -0500462static unsigned int
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600463smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
Steve French3d621232018-09-25 15:33:47 -0500464{
465 struct TCP_Server_Info *server = tcon->ses->server;
466 unsigned int rsize;
467
468 /* start with specified rsize, or default */
Ronnie Sahlberg3fa1c6d2020-12-09 23:07:12 -0600469 rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE;
Steve French3d621232018-09-25 15:33:47 -0500470 rsize = min_t(unsigned int, rsize, server->max_read);
471#ifdef CONFIG_CIFS_SMB_DIRECT
472 if (server->rdma) {
473 if (server->sign)
Long Lif7950cb2020-03-26 19:42:24 -0700474 /*
475 * Account for SMB2 data transfer packet header and
476 * possible encryption header
477 */
Steve French3d621232018-09-25 15:33:47 -0500478 rsize = min_t(unsigned int,
Long Lif7950cb2020-03-26 19:42:24 -0700479 rsize,
480 server->smbd_conn->max_fragmented_recv_size -
481 SMB2_READWRITE_PDU_HEADER_SIZE -
482 sizeof(struct smb2_transform_hdr));
Steve French3d621232018-09-25 15:33:47 -0500483 else
484 rsize = min_t(unsigned int,
485 rsize, server->smbd_conn->max_readwrite_size);
486 }
487#endif
488
489 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
490 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
491
492 return rsize;
493}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200494
495static int
496parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
497 size_t buf_len,
498 struct cifs_server_iface **iface_list,
499 size_t *iface_count)
500{
501 struct network_interface_info_ioctl_rsp *p;
502 struct sockaddr_in *addr4;
503 struct sockaddr_in6 *addr6;
504 struct iface_info_ipv4 *p4;
505 struct iface_info_ipv6 *p6;
506 struct cifs_server_iface *info;
507 ssize_t bytes_left;
508 size_t next = 0;
509 int nb_iface = 0;
510 int rc = 0;
511
512 *iface_list = NULL;
513 *iface_count = 0;
514
515 /*
516 * Fist pass: count and sanity check
517 */
518
519 bytes_left = buf_len;
520 p = buf;
521 while (bytes_left >= sizeof(*p)) {
522 nb_iface++;
523 next = le32_to_cpu(p->Next);
524 if (!next) {
525 bytes_left -= sizeof(*p);
526 break;
527 }
528 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
529 bytes_left -= next;
530 }
531
532 if (!nb_iface) {
533 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
534 rc = -EINVAL;
535 goto out;
536 }
537
Steve Frenchebcd6de2020-12-08 21:13:31 -0600538 /* Azure rounds the buffer size up 8, to a 16 byte boundary */
539 if ((bytes_left > 8) || p->Next)
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200540 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
541
542
543 /*
544 * Second pass: extract info to internal structure
545 */
546
547 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
548 if (!*iface_list) {
549 rc = -ENOMEM;
550 goto out;
551 }
552
553 info = *iface_list;
554 bytes_left = buf_len;
555 p = buf;
556 while (bytes_left >= sizeof(*p)) {
557 info->speed = le64_to_cpu(p->LinkSpeed);
558 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
559 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
560
561 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
562 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
563 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
564 le32_to_cpu(p->Capability));
565
566 switch (p->Family) {
567 /*
568 * The kernel and wire socket structures have the same
569 * layout and use network byte order but make the
570 * conversion explicit in case either one changes.
571 */
572 case INTERNETWORK:
573 addr4 = (struct sockaddr_in *)&info->sockaddr;
574 p4 = (struct iface_info_ipv4 *)p->Buffer;
575 addr4->sin_family = AF_INET;
576 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
577
578 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
579 addr4->sin_port = cpu_to_be16(CIFS_PORT);
580
581 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
582 &addr4->sin_addr);
583 break;
584 case INTERNETWORKV6:
585 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
586 p6 = (struct iface_info_ipv6 *)p->Buffer;
587 addr6->sin6_family = AF_INET6;
588 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
589
590 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
591 addr6->sin6_flowinfo = 0;
592 addr6->sin6_scope_id = 0;
593 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
594
595 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
596 &addr6->sin6_addr);
597 break;
598 default:
599 cifs_dbg(VFS,
600 "%s: skipping unsupported socket family\n",
601 __func__);
602 goto next_iface;
603 }
604
605 (*iface_count)++;
606 info++;
607next_iface:
608 next = le32_to_cpu(p->Next);
609 if (!next)
610 break;
611 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
612 bytes_left -= next;
613 }
614
615 if (!*iface_count) {
616 rc = -EINVAL;
617 goto out;
618 }
619
620out:
621 if (rc) {
622 kfree(*iface_list);
623 *iface_count = 0;
624 *iface_list = NULL;
625 }
626 return rc;
627}
628
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200629static int compare_iface(const void *ia, const void *ib)
630{
631 const struct cifs_server_iface *a = (struct cifs_server_iface *)ia;
632 const struct cifs_server_iface *b = (struct cifs_server_iface *)ib;
633
634 return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1);
635}
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200636
Steve Frenchc481e9f2013-10-14 01:21:53 -0500637static int
638SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
639{
640 int rc;
641 unsigned int ret_data_len = 0;
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200642 struct network_interface_info_ioctl_rsp *out_buf = NULL;
643 struct cifs_server_iface *iface_list;
644 size_t iface_count;
645 struct cifs_ses *ses = tcon->ses;
Steve Frenchc481e9f2013-10-14 01:21:53 -0500646
647 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
648 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
649 NULL /* no data input */, 0 /* no data input */,
Steve French153322f2019-03-28 22:32:49 -0500650 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Steve Frenchc3ed4402018-06-28 22:53:39 -0500651 if (rc == -EOPNOTSUPP) {
652 cifs_dbg(FYI,
653 "server does not support query network interfaces\n");
654 goto out;
655 } else if (rc != 0) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +1000656 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200657 goto out;
Steve French9ffc5412014-10-16 15:13:14 -0500658 }
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200659
660 rc = parse_server_interfaces(out_buf, ret_data_len,
661 &iface_list, &iface_count);
662 if (rc)
663 goto out;
664
Aurelien Aptel35adffe2019-09-20 06:29:39 +0200665 /* sort interfaces from fastest to slowest */
666 sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL);
667
Aurelien Aptelfe856be2018-06-14 17:04:51 +0200668 spin_lock(&ses->iface_lock);
669 kfree(ses->iface_list);
670 ses->iface_list = iface_list;
671 ses->iface_count = iface_count;
672 ses->iface_last_update = jiffies;
673 spin_unlock(&ses->iface_lock);
674
675out:
Steve French24df1482016-09-29 04:20:23 -0500676 kfree(out_buf);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500677 return rc;
678}
Steve Frenchc481e9f2013-10-14 01:21:53 -0500679
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000680static void
681smb2_close_cached_fid(struct kref *ref)
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000682{
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000683 struct cached_fid *cfid = container_of(ref, struct cached_fid,
684 refcount);
685
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000686 if (cfid->is_valid) {
687 cifs_dbg(FYI, "clear cached root file handle\n");
688 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
689 cfid->fid->volatile_fid);
690 cfid->is_valid = false;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000691 cfid->file_all_info_is_valid = false;
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800692 cfid->has_lease = false;
Ronnie Sahlberg5e9c89d2021-03-09 09:07:31 +1000693 if (cfid->dentry) {
694 dput(cfid->dentry);
695 cfid->dentry = NULL;
696 }
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000697 }
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000698}
699
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000700void close_cached_dir(struct cached_fid *cfid)
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000701{
702 mutex_lock(&cfid->fid_mutex);
703 kref_put(&cfid->refcount, smb2_close_cached_fid);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000704 mutex_unlock(&cfid->fid_mutex);
705}
706
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000707void close_cached_dir_lease_locked(struct cached_fid *cfid)
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800708{
709 if (cfid->has_lease) {
710 cfid->has_lease = false;
711 kref_put(&cfid->refcount, smb2_close_cached_fid);
712 }
713}
714
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000715void close_cached_dir_lease(struct cached_fid *cfid)
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800716{
717 mutex_lock(&cfid->fid_mutex);
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000718 close_cached_dir_lease_locked(cfid);
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800719 mutex_unlock(&cfid->fid_mutex);
720}
721
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000722void
723smb2_cached_lease_break(struct work_struct *work)
724{
725 struct cached_fid *cfid = container_of(work,
726 struct cached_fid, lease_break);
727
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000728 close_cached_dir_lease(cfid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000729}
730
Steve French3d4ef9a2018-04-25 22:19:09 -0500731/*
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000732 * Open the and cache a directory handle.
733 * Only supported for the root handle.
Steve French3d4ef9a2018-04-25 22:19:09 -0500734 */
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000735int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlberge6eb1952021-03-09 09:07:28 +1000736 const char *path,
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000737 struct cifs_sb_info *cifs_sb,
738 struct cached_fid **cfid)
Steve French3d4ef9a2018-04-25 22:19:09 -0500739{
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000740 struct cifs_ses *ses = tcon->ses;
741 struct TCP_Server_Info *server = ses->server;
742 struct cifs_open_parms oparms;
743 struct smb2_create_rsp *o_rsp = NULL;
744 struct smb2_query_info_rsp *qi_rsp = NULL;
745 int resp_buftype[2];
746 struct smb_rqst rqst[2];
747 struct kvec rsp_iov[2];
748 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
749 struct kvec qi_iov[1];
750 int rc, flags = 0;
751 __le16 utf16_path = 0; /* Null - since an open of top of share */
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000752 u8 oplock = SMB2_OPLOCK_LEVEL_II;
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000753 struct cifs_fid *pfid;
Ronnie Sahlberg5e9c89d2021-03-09 09:07:31 +1000754 struct dentry *dentry;
Steve French3d4ef9a2018-04-25 22:19:09 -0500755
Ronnie Sahlberg4df3d972021-03-09 09:07:27 +1000756 if (tcon->nohandlecache)
757 return -ENOTSUPP;
758
Ronnie Sahlberg269f67e2021-03-09 09:07:30 +1000759 if (cifs_sb->root == NULL)
760 return -ENOENT;
761
Ronnie Sahlberge6eb1952021-03-09 09:07:28 +1000762 if (strlen(path))
Ronnie Sahlberg269f67e2021-03-09 09:07:30 +1000763 return -ENOENT;
Ronnie Sahlberge6eb1952021-03-09 09:07:28 +1000764
Ronnie Sahlberg5e9c89d2021-03-09 09:07:31 +1000765 dentry = cifs_sb->root;
766
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000767 mutex_lock(&tcon->crfid.fid_mutex);
768 if (tcon->crfid.is_valid) {
Steve French3d4ef9a2018-04-25 22:19:09 -0500769 cifs_dbg(FYI, "found a cached root file handle\n");
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000770 *cfid = &tcon->crfid;
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000771 kref_get(&tcon->crfid.refcount);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000772 mutex_unlock(&tcon->crfid.fid_mutex);
Steve French3d4ef9a2018-04-25 22:19:09 -0500773 return 0;
774 }
775
Steve French96d9f7e2019-09-12 17:52:54 -0500776 /*
777 * We do not hold the lock for the open because in case
778 * SMB2_open needs to reconnect, it will end up calling
779 * cifs_mark_open_files_invalid() which takes the lock again
780 * thus causing a deadlock
781 */
782
783 mutex_unlock(&tcon->crfid.fid_mutex);
784
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000785 if (smb3_encryption_required(tcon))
786 flags |= CIFS_TRANSFORM_REQ;
Steve French3d4ef9a2018-04-25 22:19:09 -0500787
Paulo Alcantara0fe07812020-04-20 23:44:24 -0300788 if (!server->ops->new_lease_key)
789 return -EIO;
790
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000791 pfid = tcon->crfid.fid;
Paulo Alcantara0fe07812020-04-20 23:44:24 -0300792 server->ops->new_lease_key(pfid);
793
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000794 memset(rqst, 0, sizeof(rqst));
795 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
796 memset(rsp_iov, 0, sizeof(rsp_iov));
797
798 /* Open */
799 memset(&open_iov, 0, sizeof(open_iov));
800 rqst[0].rq_iov = open_iov;
801 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
802
803 oparms.tcon = tcon;
Amir Goldstein0f060932020-02-03 21:46:43 +0200804 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000805 oparms.desired_access = FILE_READ_ATTRIBUTES;
806 oparms.disposition = FILE_OPEN;
807 oparms.fid = pfid;
808 oparms.reconnect = false;
809
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500810 rc = SMB2_open_init(tcon, server,
811 &rqst[0], &oplock, &oparms, &utf16_path);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000812 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500813 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000814 smb2_set_next_command(tcon, &rqst[0]);
815
816 memset(&qi_iov, 0, sizeof(qi_iov));
817 rqst[1].rq_iov = qi_iov;
818 rqst[1].rq_nvec = 1;
819
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500820 rc = SMB2_query_info_init(tcon, server,
821 &rqst[1], COMPOUND_FID,
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000822 COMPOUND_FID, FILE_ALL_INFORMATION,
823 SMB2_O_INFO_FILE, 0,
824 sizeof(struct smb2_file_all_info) +
825 PATH_MAX * 2, 0, NULL);
826 if (rc)
Steve French96d9f7e2019-09-12 17:52:54 -0500827 goto oshr_free;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000828
829 smb2_set_related(&rqst[1]);
830
Aurelien Aptel352d96f2020-05-31 12:38:22 -0500831 rc = compound_send_recv(xid, ses, server,
832 flags, 2, rqst,
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000833 resp_buftype, rsp_iov);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200834 mutex_lock(&tcon->crfid.fid_mutex);
835
836 /*
837 * Now we need to check again as the cached root might have
838 * been successfully re-opened from a concurrent process
839 */
840
841 if (tcon->crfid.is_valid) {
842 /* work was already done */
843
844 /* stash fids for close() later */
845 struct cifs_fid fid = {
846 .persistent_fid = pfid->persistent_fid,
847 .volatile_fid = pfid->volatile_fid,
848 };
849
850 /*
Muhammad Usama Anjumad7567b2021-04-15 20:24:09 +0500851 * caller expects this func to set the fid in crfid to valid
852 * cached root, so increment the refcount.
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200853 */
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200854 kref_get(&tcon->crfid.refcount);
855
856 mutex_unlock(&tcon->crfid.fid_mutex);
857
858 if (rc == 0) {
859 /* close extra handle outside of crit sec */
860 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
861 }
Xiyu Yang77577de2020-06-13 20:27:09 +0800862 rc = 0;
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200863 goto oshr_free;
864 }
865
866 /* Cached root is still invalid, continue normaly */
867
Steve French7dcc82c2019-09-11 00:07:36 -0500868 if (rc) {
869 if (rc == -EREMCHG) {
870 tcon->need_reconnect = true;
Joe Perchesa0a30362020-04-14 22:42:53 -0700871 pr_warn_once("server share %s deleted\n",
872 tcon->treeName);
Steve French7dcc82c2019-09-11 00:07:36 -0500873 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000874 goto oshr_exit;
Steve French7dcc82c2019-09-11 00:07:36 -0500875 }
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000876
Steve Frenchd2f15422019-09-22 00:55:46 -0500877 atomic_inc(&tcon->num_remote_opens);
878
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000879 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
880 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
881 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
882#ifdef CONFIG_CIFS_DEBUG2
883 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
884#endif /* CIFS_DEBUG2 */
885
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000886 tcon->crfid.tcon = tcon;
887 tcon->crfid.is_valid = true;
Ronnie Sahlberg5e9c89d2021-03-09 09:07:31 +1000888 tcon->crfid.dentry = dentry;
889 dget(dentry);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000890 kref_init(&tcon->crfid.refcount);
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000891
Steve French89a5bfa2019-07-18 17:22:18 -0500892 /* BB TBD check to see if oplock level check can be removed below */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000893 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
Muhammad Usama Anjumad7567b2021-04-15 20:24:09 +0500894 /*
895 * See commit 2f94a3125b87. Increment the refcount when we
896 * get a lease for root, release it if lease break occurs
897 */
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000898 kref_get(&tcon->crfid.refcount);
Pavel Shilovskyd9191312019-12-10 11:44:52 -0800899 tcon->crfid.has_lease = true;
Steve French89a5bfa2019-07-18 17:22:18 -0500900 smb2_parse_contexts(server, o_rsp,
901 &oparms.fid->epoch,
Aurelien Aptel69dda302020-03-02 17:53:22 +0100902 oparms.fid->lease_key, &oplock,
903 NULL, NULL);
Ronnie Sahlberg2f94a3122019-03-28 11:20:02 +1000904 } else
905 goto oshr_exit;
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000906
907 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
908 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
909 goto oshr_exit;
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000910 if (!smb2_validate_and_copy_iov(
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000911 le16_to_cpu(qi_rsp->OutputBufferOffset),
912 sizeof(struct smb2_file_all_info),
913 &rsp_iov[1], sizeof(struct smb2_file_all_info),
Ronnie Sahlberg4811e302019-04-01 09:53:44 +1000914 (char *)&tcon->crfid.file_all_info))
zhengbin720aec02019-12-25 11:30:20 +0800915 tcon->crfid.file_all_info_is_valid = true;
Ronnie Sahlberged20f542021-03-09 09:07:33 +1000916 tcon->crfid.time = jiffies;
917
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000918
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200919oshr_exit:
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000920 mutex_unlock(&tcon->crfid.fid_mutex);
Aurelien Aptel7e5a70a2019-07-17 12:46:28 +0200921oshr_free:
Ronnie Sahlbergb0f6df72019-03-12 13:58:31 +1000922 SMB2_open_free(&rqst[0]);
923 SMB2_query_info_free(&rqst[1]);
924 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
925 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000926 if (rc == 0)
927 *cfid = &tcon->crfid;
Steve French3d4ef9a2018-04-25 22:19:09 -0500928 return rc;
929}
930
Ronnie Sahlberg6ef4e9c2021-03-09 09:07:32 +1000931int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
932 struct dentry *dentry,
933 struct cached_fid **cfid)
934{
935 mutex_lock(&tcon->crfid.fid_mutex);
936 if (tcon->crfid.dentry == dentry) {
937 cifs_dbg(FYI, "found a cached root file handle by dentry\n");
938 *cfid = &tcon->crfid;
939 kref_get(&tcon->crfid.refcount);
940 mutex_unlock(&tcon->crfid.fid_mutex);
941 return 0;
942 }
943 mutex_unlock(&tcon->crfid.fid_mutex);
944 return -ENOENT;
945}
946
Steve French34f62642013-10-09 02:07:00 -0500947static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200948smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
949 struct cifs_sb_info *cifs_sb)
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500950{
951 int rc;
952 __le16 srch_path = 0; /* Null - open root of share */
953 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
954 struct cifs_open_parms oparms;
955 struct cifs_fid fid;
Ronnie Sahlberg9e81e8f2020-10-05 12:37:52 +1000956 struct cached_fid *cfid = NULL;
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500957
958 oparms.tcon = tcon;
959 oparms.desired_access = FILE_READ_ATTRIBUTES;
960 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +0200961 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500962 oparms.fid = &fid;
963 oparms.reconnect = false;
964
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000965 rc = open_cached_dir(xid, tcon, "", cifs_sb, &cfid);
Ronnie Sahlberg4df3d972021-03-09 09:07:27 +1000966 if (rc == 0)
967 memcpy(&fid, cfid->fid, sizeof(struct cifs_fid));
968 else
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +1000969 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
Aurelien Aptel69dda302020-03-02 17:53:22 +0100970 NULL, NULL);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500971 if (rc)
972 return;
973
Steve Frenchc481e9f2013-10-14 01:21:53 -0500974 SMB3_request_interfaces(xid, tcon);
Steve Frenchc481e9f2013-10-14 01:21:53 -0500975
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500976 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
977 FS_ATTRIBUTE_INFORMATION);
978 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
979 FS_DEVICE_INFORMATION);
980 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steve French21ba3842018-06-24 23:18:52 -0500981 FS_VOLUME_INFORMATION);
982 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500983 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
Ronnie Sahlberg4df3d972021-03-09 09:07:27 +1000984 if (cfid == NULL)
Steve French3d4ef9a2018-04-25 22:19:09 -0500985 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Ronnie Sahlberg9da6ec72018-07-31 08:48:22 +1000986 else
Ronnie Sahlberg45c0f1a2021-03-09 09:07:29 +1000987 close_cached_dir(cfid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -0500988}
989
990static void
Amir Goldstein0f060932020-02-03 21:46:43 +0200991smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
992 struct cifs_sb_info *cifs_sb)
Steve French34f62642013-10-09 02:07:00 -0500993{
994 int rc;
995 __le16 srch_path = 0; /* Null - open root of share */
996 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
997 struct cifs_open_parms oparms;
998 struct cifs_fid fid;
999
1000 oparms.tcon = tcon;
1001 oparms.desired_access = FILE_READ_ATTRIBUTES;
1002 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001003 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French34f62642013-10-09 02:07:00 -05001004 oparms.fid = &fid;
1005 oparms.reconnect = false;
1006
Aurelien Aptel69dda302020-03-02 17:53:22 +01001007 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
1008 NULL, NULL);
Steve French34f62642013-10-09 02:07:00 -05001009 if (rc)
1010 return;
1011
Steven French21671142013-10-09 13:36:35 -05001012 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
1013 FS_ATTRIBUTE_INFORMATION);
1014 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
1015 FS_DEVICE_INFORMATION);
Steve French34f62642013-10-09 02:07:00 -05001016 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Steve French34f62642013-10-09 02:07:00 -05001017}
1018
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001019static int
1020smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
1021 struct cifs_sb_info *cifs_sb, const char *full_path)
1022{
1023 int rc;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001024 __le16 *utf16_path;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001025 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001026 struct cifs_open_parms oparms;
1027 struct cifs_fid fid;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001028
Ronnie Sahlberga93864d2018-06-14 06:48:35 +10001029 if ((*full_path == 0) && tcon->crfid.is_valid)
Steve French3d4ef9a2018-04-25 22:19:09 -05001030 return 0;
1031
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001032 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
1033 if (!utf16_path)
1034 return -ENOMEM;
1035
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001036 oparms.tcon = tcon;
1037 oparms.desired_access = FILE_READ_ATTRIBUTES;
1038 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001039 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001040 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001041 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001042
Aurelien Aptel69dda302020-03-02 17:53:22 +01001043 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
1044 NULL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001045 if (rc) {
1046 kfree(utf16_path);
1047 return rc;
1048 }
1049
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001050 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001051 kfree(utf16_path);
1052 return rc;
1053}
1054
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001055static int
1056smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
1057 struct cifs_sb_info *cifs_sb, const char *full_path,
1058 u64 *uniqueid, FILE_ALL_INFO *data)
1059{
1060 *uniqueid = le64_to_cpu(data->IndexNumber);
1061 return 0;
1062}
1063
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07001064static int
1065smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
1066 struct cifs_fid *fid, FILE_ALL_INFO *data)
1067{
1068 int rc;
1069 struct smb2_file_all_info *smb2_data;
1070
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +04001071 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07001072 GFP_KERNEL);
1073 if (smb2_data == NULL)
1074 return -ENOMEM;
1075
1076 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
1077 smb2_data);
1078 if (!rc)
1079 move_smb2_info_to_cifs(data, smb2_data);
1080 kfree(smb2_data);
1081 return rc;
1082}
1083
Arnd Bergmann1368f152017-09-05 11:24:15 +02001084#ifdef CONFIG_CIFS_XATTR
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001085static ssize_t
1086move_smb2_ea_to_cifs(char *dst, size_t dst_size,
1087 struct smb2_file_full_ea_info *src, size_t src_size,
1088 const unsigned char *ea_name)
1089{
1090 int rc = 0;
1091 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
1092 char *name, *value;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001093 size_t buf_size = dst_size;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001094 size_t name_len, value_len, user_name_len;
1095
1096 while (src_size > 0) {
1097 name = &src->ea_data[0];
1098 name_len = (size_t)src->ea_name_length;
1099 value = &src->ea_data[src->ea_name_length + 1];
1100 value_len = (size_t)le16_to_cpu(src->ea_value_length);
1101
Christoph Probsta205d502019-05-08 21:36:25 +02001102 if (name_len == 0)
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001103 break;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001104
1105 if (src_size < 8 + name_len + 1 + value_len) {
1106 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
1107 rc = -EIO;
1108 goto out;
1109 }
1110
1111 if (ea_name) {
1112 if (ea_name_len == name_len &&
1113 memcmp(ea_name, name, name_len) == 0) {
1114 rc = value_len;
1115 if (dst_size == 0)
1116 goto out;
1117 if (dst_size < value_len) {
1118 rc = -ERANGE;
1119 goto out;
1120 }
1121 memcpy(dst, value, value_len);
1122 goto out;
1123 }
1124 } else {
1125 /* 'user.' plus a terminating null */
1126 user_name_len = 5 + 1 + name_len;
1127
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001128 if (buf_size == 0) {
1129 /* skip copy - calc size only */
1130 rc += user_name_len;
1131 } else if (dst_size >= user_name_len) {
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001132 dst_size -= user_name_len;
1133 memcpy(dst, "user.", 5);
1134 dst += 5;
1135 memcpy(dst, src->ea_data, name_len);
1136 dst += name_len;
1137 *dst = 0;
1138 ++dst;
Ronnie Sahlberg0c5d6cb2018-10-25 15:43:36 +10001139 rc += user_name_len;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001140 } else {
1141 /* stop before overrun buffer */
1142 rc = -ERANGE;
1143 break;
1144 }
1145 }
1146
1147 if (!src->next_entry_offset)
1148 break;
1149
1150 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1151 /* stop before overrun buffer */
1152 rc = -ERANGE;
1153 break;
1154 }
1155 src_size -= le32_to_cpu(src->next_entry_offset);
1156 src = (void *)((char *)src +
1157 le32_to_cpu(src->next_entry_offset));
1158 }
1159
1160 /* didn't find the named attribute */
1161 if (ea_name)
1162 rc = -ENODATA;
1163
1164out:
1165 return (ssize_t)rc;
1166}
1167
1168static ssize_t
1169smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1170 const unsigned char *path, const unsigned char *ea_name,
1171 char *ea_data, size_t buf_size,
1172 struct cifs_sb_info *cifs_sb)
1173{
1174 int rc;
1175 __le16 *utf16_path;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001176 struct kvec rsp_iov = {NULL, 0};
1177 int buftype = CIFS_NO_BUFFER;
1178 struct smb2_query_info_rsp *rsp;
1179 struct smb2_file_full_ea_info *info = NULL;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001180
1181 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1182 if (!utf16_path)
1183 return -ENOMEM;
1184
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001185 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1186 FILE_READ_EA,
1187 FILE_FULL_EA_INFORMATION,
1188 SMB2_O_INFO_FILE,
Ronnie Sahlbergc4627e62019-01-29 12:46:17 +10001189 CIFSMaxBufSize -
1190 MAX_SMB2_CREATE_RESPONSE_SIZE -
1191 MAX_SMB2_CLOSE_RESPONSE_SIZE,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001192 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001193 if (rc) {
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001194 /*
1195 * If ea_name is NULL (listxattr) and there are no EAs,
1196 * return 0 as it's not an error. Otherwise, the specified
1197 * ea_name was not found.
1198 */
1199 if (!ea_name && rc == -ENODATA)
1200 rc = 0;
1201 goto qeas_exit;
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001202 }
1203
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001204 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1205 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1206 le32_to_cpu(rsp->OutputBufferLength),
1207 &rsp_iov,
1208 sizeof(struct smb2_file_full_ea_info));
1209 if (rc)
1210 goto qeas_exit;
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001211
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001212 info = (struct smb2_file_full_ea_info *)(
1213 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1214 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1215 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Ronnie Sahlberg7cb3def2017-09-28 09:39:58 +10001216
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10001217 qeas_exit:
1218 kfree(utf16_path);
1219 free_rsp_buf(buftype, rsp_iov.iov_base);
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10001220 return rc;
1221}
1222
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001223
1224static int
1225smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1226 const char *path, const char *ea_name, const void *ea_value,
1227 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1228 struct cifs_sb_info *cifs_sb)
1229{
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001230 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001231 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001232 __le16 *utf16_path = NULL;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001233 int ea_name_len = strlen(ea_name);
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03001234 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001235 int len;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001236 struct smb_rqst rqst[3];
1237 int resp_buftype[3];
1238 struct kvec rsp_iov[3];
1239 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1240 struct cifs_open_parms oparms;
1241 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1242 struct cifs_fid fid;
1243 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1244 unsigned int size[1];
1245 void *data[1];
1246 struct smb2_file_full_ea_info *ea = NULL;
1247 struct kvec close_iov[1];
Ronnie Sahlberg85db6b72020-02-13 12:14:47 +10001248 struct smb2_query_info_rsp *rsp;
1249 int rc, used_len = 0;
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001250
1251 if (smb3_encryption_required(tcon))
1252 flags |= CIFS_TRANSFORM_REQ;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001253
1254 if (ea_name_len > 255)
1255 return -EINVAL;
1256
1257 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1258 if (!utf16_path)
1259 return -ENOMEM;
1260
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001261 memset(rqst, 0, sizeof(rqst));
1262 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1263 memset(rsp_iov, 0, sizeof(rsp_iov));
1264
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001265 if (ses->server->ops->query_all_EAs) {
1266 if (!ea_value) {
1267 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1268 ea_name, NULL, 0,
1269 cifs_sb);
1270 if (rc == -ENODATA)
1271 goto sea_exit;
Ronnie Sahlberg85db6b72020-02-13 12:14:47 +10001272 } else {
1273 /* If we are adding a attribute we should first check
1274 * if there will be enough space available to store
1275 * the new EA. If not we should not add it since we
1276 * would not be able to even read the EAs back.
1277 */
1278 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1279 FILE_READ_EA,
1280 FILE_FULL_EA_INFORMATION,
1281 SMB2_O_INFO_FILE,
1282 CIFSMaxBufSize -
1283 MAX_SMB2_CREATE_RESPONSE_SIZE -
1284 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1285 &rsp_iov[1], &resp_buftype[1], cifs_sb);
1286 if (rc == 0) {
1287 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1288 used_len = le32_to_cpu(rsp->OutputBufferLength);
1289 }
1290 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1291 resp_buftype[1] = CIFS_NO_BUFFER;
1292 memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1293 rc = 0;
1294
1295 /* Use a fudge factor of 256 bytes in case we collide
1296 * with a different set_EAs command.
1297 */
1298 if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1299 MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1300 used_len + ea_name_len + ea_value_len + 1) {
1301 rc = -ENOSPC;
1302 goto sea_exit;
1303 }
Ronnie Sahlberg21094642019-02-07 15:48:44 +10001304 }
1305 }
1306
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001307 /* Open */
1308 memset(&open_iov, 0, sizeof(open_iov));
1309 rqst[0].rq_iov = open_iov;
1310 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1311
1312 memset(&oparms, 0, sizeof(oparms));
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001313 oparms.tcon = tcon;
1314 oparms.desired_access = FILE_WRITE_EA;
1315 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001316 oparms.create_options = cifs_create_options(cifs_sb, 0);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001317 oparms.fid = &fid;
1318 oparms.reconnect = false;
1319
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001320 rc = SMB2_open_init(tcon, server,
1321 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001322 if (rc)
1323 goto sea_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001324 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001325
1326
1327 /* Set Info */
1328 memset(&si_iov, 0, sizeof(si_iov));
1329 rqst[1].rq_iov = si_iov;
1330 rqst[1].rq_nvec = 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001331
Vladimir Zapolskiy64b7f672020-10-10 21:25:54 +03001332 len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001333 ea = kzalloc(len, GFP_KERNEL);
1334 if (ea == NULL) {
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001335 rc = -ENOMEM;
1336 goto sea_exit;
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001337 }
1338
1339 ea->ea_name_length = ea_name_len;
1340 ea->ea_value_length = cpu_to_le16(ea_value_len);
1341 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1342 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1343
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001344 size[0] = len;
1345 data[0] = ea;
1346
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001347 rc = SMB2_set_info_init(tcon, server,
1348 &rqst[1], COMPOUND_FID,
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001349 COMPOUND_FID, current->tgid,
1350 FILE_FULL_EA_INFORMATION,
1351 SMB2_O_INFO_FILE, 0, data, size);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001352 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001353 smb2_set_related(&rqst[1]);
1354
1355
1356 /* Close */
1357 memset(&close_iov, 0, sizeof(close_iov));
1358 rqst[2].rq_iov = close_iov;
1359 rqst[2].rq_nvec = 1;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001360 rc = SMB2_close_init(tcon, server,
1361 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001362 smb2_set_related(&rqst[2]);
1363
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001364 rc = compound_send_recv(xid, ses, server,
1365 flags, 3, rqst,
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001366 resp_buftype, rsp_iov);
Steve Frenchd2f15422019-09-22 00:55:46 -05001367 /* no need to bump num_remote_opens because handle immediately closed */
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001368
1369 sea_exit:
Paulo Alcantara6aa0c112018-07-04 14:16:16 -03001370 kfree(ea);
Ronnie Sahlberg0967e542018-11-06 22:52:43 +10001371 kfree(utf16_path);
1372 SMB2_open_free(&rqst[0]);
1373 SMB2_set_info_free(&rqst[1]);
1374 SMB2_close_free(&rqst[2]);
1375 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1376 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1377 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001378 return rc;
1379}
Arnd Bergmann1368f152017-09-05 11:24:15 +02001380#endif
Ronnie Sahlberg55175542017-08-24 11:24:56 +10001381
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001382static bool
1383smb2_can_echo(struct TCP_Server_Info *server)
1384{
1385 return server->echoes;
1386}
1387
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001388static void
1389smb2_clear_stats(struct cifs_tcon *tcon)
1390{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001391 int i;
Christoph Probsta205d502019-05-08 21:36:25 +02001392
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001393 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1394 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1395 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1396 }
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001397}
1398
1399static void
Steve French769ee6a2013-06-19 14:15:30 -05001400smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1401{
1402 seq_puts(m, "\n\tShare Capabilities:");
1403 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1404 seq_puts(m, " DFS,");
1405 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1406 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1407 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1408 seq_puts(m, " SCALEOUT,");
1409 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1410 seq_puts(m, " CLUSTER,");
1411 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1412 seq_puts(m, " ASYMMETRIC,");
1413 if (tcon->capabilities == 0)
1414 seq_puts(m, " None");
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001415 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1416 seq_puts(m, " Aligned,");
1417 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1418 seq_puts(m, " Partition Aligned,");
1419 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1420 seq_puts(m, " SSD,");
1421 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1422 seq_puts(m, " TRIM-support,");
1423
Steve French769ee6a2013-06-19 14:15:30 -05001424 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
Steve Frenche0386e42018-05-20 01:27:03 -05001425 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
Steven Frenchaf6a12e2013-10-09 20:55:53 -05001426 if (tcon->perf_sector_size)
1427 seq_printf(m, "\tOptimal sector size: 0x%x",
1428 tcon->perf_sector_size);
Steve Frenche0386e42018-05-20 01:27:03 -05001429 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
Steve French769ee6a2013-06-19 14:15:30 -05001430}
1431
1432static void
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001433smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1434{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001435 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1436 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
Steve French1995d282018-07-27 15:14:04 -05001437
1438 /*
1439 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1440 * totals (requests sent) since those SMBs are per-session not per tcon
1441 */
Steve French52ce1ac2018-07-31 01:46:47 -05001442 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1443 (long long)(tcon->bytes_read),
1444 (long long)(tcon->bytes_written));
Steve Frenchfae80442018-10-19 17:14:32 -05001445 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1446 atomic_read(&tcon->num_local_opens),
1447 atomic_read(&tcon->num_remote_opens));
Steve French1995d282018-07-27 15:14:04 -05001448 seq_printf(m, "\nTreeConnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001449 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1450 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001451 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001452 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1453 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
Steve French1995d282018-07-27 15:14:04 -05001454 seq_printf(m, "\nCreates: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001455 atomic_read(&sent[SMB2_CREATE_HE]),
1456 atomic_read(&failed[SMB2_CREATE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001457 seq_printf(m, "\nCloses: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001458 atomic_read(&sent[SMB2_CLOSE_HE]),
1459 atomic_read(&failed[SMB2_CLOSE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001460 seq_printf(m, "\nFlushes: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001461 atomic_read(&sent[SMB2_FLUSH_HE]),
1462 atomic_read(&failed[SMB2_FLUSH_HE]));
Steve French1995d282018-07-27 15:14:04 -05001463 seq_printf(m, "\nReads: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001464 atomic_read(&sent[SMB2_READ_HE]),
1465 atomic_read(&failed[SMB2_READ_HE]));
Steve French1995d282018-07-27 15:14:04 -05001466 seq_printf(m, "\nWrites: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001467 atomic_read(&sent[SMB2_WRITE_HE]),
1468 atomic_read(&failed[SMB2_WRITE_HE]));
Steve French1995d282018-07-27 15:14:04 -05001469 seq_printf(m, "\nLocks: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001470 atomic_read(&sent[SMB2_LOCK_HE]),
1471 atomic_read(&failed[SMB2_LOCK_HE]));
Steve French1995d282018-07-27 15:14:04 -05001472 seq_printf(m, "\nIOCTLs: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001473 atomic_read(&sent[SMB2_IOCTL_HE]),
1474 atomic_read(&failed[SMB2_IOCTL_HE]));
Steve French1995d282018-07-27 15:14:04 -05001475 seq_printf(m, "\nQueryDirectories: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001476 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1477 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001478 seq_printf(m, "\nChangeNotifies: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001479 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1480 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
Steve French1995d282018-07-27 15:14:04 -05001481 seq_printf(m, "\nQueryInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001482 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1483 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
Steve French1995d282018-07-27 15:14:04 -05001484 seq_printf(m, "\nSetInfos: %d total %d failed",
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001485 atomic_read(&sent[SMB2_SET_INFO_HE]),
1486 atomic_read(&failed[SMB2_SET_INFO_HE]));
1487 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1488 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1489 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04001490}
1491
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001492static void
1493smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1494{
David Howells2b0143b2015-03-17 22:25:59 +00001495 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04001496 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1497
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001498 cfile->fid.persistent_fid = fid->persistent_fid;
1499 cfile->fid.volatile_fid = fid->volatile_fid;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01001500 cfile->fid.access = fid->access;
Steve Frenchdfe33f92018-10-30 19:50:31 -05001501#ifdef CONFIG_CIFS_DEBUG2
1502 cfile->fid.mid = fid->mid;
1503#endif /* CIFS_DEBUG2 */
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001504 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1505 &fid->purge_cache);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001506 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
Aurelien Aptel94f87372016-09-22 07:38:50 +02001507 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001508}
1509
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001510static void
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001511smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1512 struct cifs_fid *fid)
1513{
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +04001514 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001515}
1516
Steve French43f8a6a2019-12-02 21:46:54 -06001517static void
1518smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
1519 struct cifsFileInfo *cfile)
1520{
1521 struct smb2_file_network_open_info file_inf;
1522 struct inode *inode;
1523 int rc;
1524
1525 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
1526 cfile->fid.volatile_fid, &file_inf);
1527 if (rc)
1528 return;
1529
1530 inode = d_inode(cfile->dentry);
1531
1532 spin_lock(&inode->i_lock);
1533 CIFS_I(inode)->time = jiffies;
1534
1535 /* Creation time should not need to be updated on close */
1536 if (file_inf.LastWriteTime)
1537 inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
1538 if (file_inf.ChangeTime)
1539 inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
1540 if (file_inf.LastAccessTime)
1541 inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
1542
1543 /*
1544 * i_blocks is not related to (i_size / i_blksize),
1545 * but instead 512 byte (2**9) size is required for
1546 * calculating num blocks.
1547 */
1548 if (le64_to_cpu(file_inf.AllocationSize) > 4096)
1549 inode->i_blocks =
1550 (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
1551
1552 /* End of file and Attributes should not have to be updated on close */
1553 spin_unlock(&inode->i_lock);
1554}
1555
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001556static int
Steve French41c13582013-11-14 00:05:36 -06001557SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1558 u64 persistent_fid, u64 volatile_fid,
1559 struct copychunk_ioctl *pcchunk)
1560{
1561 int rc;
1562 unsigned int ret_data_len;
1563 struct resume_key_req *res_key;
1564
1565 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1566 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05001567 NULL, 0 /* no input */, CIFSMaxBufSize,
Steve French41c13582013-11-14 00:05:36 -06001568 (char **)&res_key, &ret_data_len);
1569
Steve French423333b2021-04-19 23:22:37 -05001570 if (rc == -EOPNOTSUPP) {
1571 pr_warn_once("Server share %s does not support copy range\n", tcon->treeName);
1572 goto req_res_key_exit;
1573 } else if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001574 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
Steve French41c13582013-11-14 00:05:36 -06001575 goto req_res_key_exit;
1576 }
1577 if (ret_data_len < sizeof(struct resume_key_req)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001578 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
Steve French41c13582013-11-14 00:05:36 -06001579 rc = -EINVAL;
1580 goto req_res_key_exit;
1581 }
1582 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1583
1584req_res_key_exit:
1585 kfree(res_key);
1586 return rc;
1587}
1588
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001589struct iqi_vars {
1590 struct smb_rqst rqst[3];
1591 struct kvec rsp_iov[3];
1592 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1593 struct kvec qi_iov[1];
1594 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
1595 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1596 struct kvec close_iov[1];
1597};
1598
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001599static int
1600smb2_ioctl_query_info(const unsigned int xid,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001601 struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02001602 struct cifs_sb_info *cifs_sb,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001603 __le16 *path, int is_dir,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001604 unsigned long p)
1605{
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001606 struct iqi_vars *vars;
1607 struct smb_rqst *rqst;
1608 struct kvec *rsp_iov;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001609 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001610 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001611 char __user *arg = (char __user *)p;
1612 struct smb_query_info qi;
1613 struct smb_query_info __user *pqi;
1614 int rc = 0;
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03001615 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001616 struct smb2_query_info_rsp *qi_rsp = NULL;
1617 struct smb2_ioctl_rsp *io_rsp = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001618 void *buffer = NULL;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001619 int resp_buftype[3];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001620 struct cifs_open_parms oparms;
1621 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1622 struct cifs_fid fid;
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001623 unsigned int size[2];
1624 void *data[2];
Amir Goldstein0f060932020-02-03 21:46:43 +02001625 int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001626
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001627 vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
1628 if (vars == NULL)
1629 return -ENOMEM;
1630 rqst = &vars->rqst[0];
1631 rsp_iov = &vars->rsp_iov[0];
1632
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001633 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001634
1635 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001636 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001637
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001638 if (qi.output_buffer_length > 1024) {
1639 kfree(vars);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001640 return -EINVAL;
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001641 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001642
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001643 if (!ses || !server) {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001644 kfree(vars);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001645 return -EIO;
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001646 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001647
1648 if (smb3_encryption_required(tcon))
1649 flags |= CIFS_TRANSFORM_REQ;
1650
Markus Elfringcfaa1182019-11-05 21:30:25 +01001651 buffer = memdup_user(arg + sizeof(struct smb_query_info),
1652 qi.output_buffer_length);
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001653 if (IS_ERR(buffer)) {
1654 kfree(vars);
Markus Elfringcfaa1182019-11-05 21:30:25 +01001655 return PTR_ERR(buffer);
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001656 }
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001657
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001658 /* Open */
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001659 rqst[0].rq_iov = &vars->open_iov[0];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001660 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001661
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001662 memset(&oparms, 0, sizeof(oparms));
1663 oparms.tcon = tcon;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001664 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02001665 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001666 oparms.fid = &fid;
1667 oparms.reconnect = false;
1668
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001669 if (qi.flags & PASSTHRU_FSCTL) {
1670 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1671 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1672 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
Steve French46e66612019-04-11 13:53:17 -05001673 break;
1674 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1675 oparms.desired_access = GENERIC_ALL;
1676 break;
1677 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1678 oparms.desired_access = GENERIC_READ;
1679 break;
1680 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1681 oparms.desired_access = GENERIC_WRITE;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001682 break;
1683 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001684 } else if (qi.flags & PASSTHRU_SET_INFO) {
1685 oparms.desired_access = GENERIC_WRITE;
1686 } else {
1687 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001688 }
1689
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001690 rc = SMB2_open_init(tcon, server,
1691 &rqst[0], &oplock, &oparms, path);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001692 if (rc)
1693 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001694 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001695
1696 /* Query */
Steve French31ba4332019-03-13 02:40:07 -05001697 if (qi.flags & PASSTHRU_FSCTL) {
1698 /* Can eventually relax perm check since server enforces too */
1699 if (!capable(CAP_SYS_ADMIN))
1700 rc = -EPERM;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001701 else {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001702 rqst[1].rq_iov = &vars->io_iov[0];
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001703 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1704
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001705 rc = SMB2_ioctl_init(tcon, server,
1706 &rqst[1],
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001707 COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlbergefac7792019-04-11 12:20:17 +10001708 qi.info_type, true, buffer,
1709 qi.output_buffer_length,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10001710 CIFSMaxBufSize -
1711 MAX_SMB2_CREATE_RESPONSE_SIZE -
1712 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001713 }
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001714 } else if (qi.flags == PASSTHRU_SET_INFO) {
1715 /* Can eventually relax perm check since server enforces too */
1716 if (!capable(CAP_SYS_ADMIN))
1717 rc = -EPERM;
1718 else {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001719 rqst[1].rq_iov = &vars->si_iov[0];
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001720 rqst[1].rq_nvec = 1;
1721
1722 size[0] = 8;
1723 data[0] = buffer;
1724
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001725 rc = SMB2_set_info_init(tcon, server,
1726 &rqst[1],
Ronnie Sahlberg0e906962019-07-25 13:08:43 +10001727 COMPOUND_FID, COMPOUND_FID,
1728 current->tgid,
1729 FILE_END_OF_FILE_INFORMATION,
1730 SMB2_O_INFO_FILE, 0, data, size);
1731 }
Steve French31ba4332019-03-13 02:40:07 -05001732 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001733 rqst[1].rq_iov = &vars->qi_iov[0];
Steve French31ba4332019-03-13 02:40:07 -05001734 rqst[1].rq_nvec = 1;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001735
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001736 rc = SMB2_query_info_init(tcon, server,
1737 &rqst[1], COMPOUND_FID,
Steve French31ba4332019-03-13 02:40:07 -05001738 COMPOUND_FID, qi.file_info_class,
1739 qi.info_type, qi.additional_information,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001740 qi.input_buffer_length,
1741 qi.output_buffer_length, buffer);
Steve French31ba4332019-03-13 02:40:07 -05001742 } else { /* unknown flags */
Joe Perchesa0a30362020-04-14 22:42:53 -07001743 cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
1744 qi.flags);
Steve French31ba4332019-03-13 02:40:07 -05001745 rc = -EINVAL;
1746 }
1747
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001748 if (rc)
1749 goto iqinf_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10001750 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001751 smb2_set_related(&rqst[1]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001752
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001753 /* Close */
Ronnie Sahlbergb2ca6c22020-05-21 15:03:15 +10001754 rqst[2].rq_iov = &vars->close_iov[0];
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001755 rqst[2].rq_nvec = 1;
1756
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001757 rc = SMB2_close_init(tcon, server,
1758 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001759 if (rc)
1760 goto iqinf_exit;
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001761 smb2_set_related(&rqst[2]);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001762
Aurelien Aptel352d96f2020-05-31 12:38:22 -05001763 rc = compound_send_recv(xid, ses, server,
1764 flags, 3, rqst,
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001765 resp_buftype, rsp_iov);
1766 if (rc)
1767 goto iqinf_exit;
Steve Frenchd2f15422019-09-22 00:55:46 -05001768
1769 /* No need to bump num_remote_opens since handle immediately closed */
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001770 if (qi.flags & PASSTHRU_FSCTL) {
1771 pqi = (struct smb_query_info __user *)arg;
1772 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1773 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1774 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001775 if (qi.input_buffer_length > 0 &&
Markus Elfring2b1116b2019-11-05 22:26:53 +01001776 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1777 > rsp_iov[1].iov_len)
1778 goto e_fault;
1779
1780 if (copy_to_user(&pqi->input_buffer_length,
1781 &qi.input_buffer_length,
1782 sizeof(qi.input_buffer_length)))
1783 goto e_fault;
1784
Ronnie Sahlberg5242fcb2019-04-15 12:13:52 +10001785 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1786 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
Markus Elfring2b1116b2019-11-05 22:26:53 +01001787 qi.input_buffer_length))
1788 goto e_fault;
Ronnie Sahlbergf5778c32019-03-15 09:07:22 +10001789 } else {
1790 pqi = (struct smb_query_info __user *)arg;
1791 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1792 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1793 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
Markus Elfring2b1116b2019-11-05 22:26:53 +01001794 if (copy_to_user(&pqi->input_buffer_length,
1795 &qi.input_buffer_length,
1796 sizeof(qi.input_buffer_length)))
1797 goto e_fault;
1798
1799 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1800 qi.input_buffer_length))
1801 goto e_fault;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001802 }
1803
1804 iqinf_exit:
Aurelien Aptelccd48ec2021-04-09 15:47:01 +02001805 cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
1806 cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
1807 cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
Ronnie Sahlberg8d8b26e2018-10-17 05:47:58 +10001808 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1809 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1810 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Aurelien Aptelccd48ec2021-04-09 15:47:01 +02001811 kfree(vars);
1812 kfree(buffer);
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001813 return rc;
Markus Elfring2b1116b2019-11-05 22:26:53 +01001814
1815e_fault:
1816 rc = -EFAULT;
1817 goto iqinf_exit;
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05001818}
1819
Sachin Prabhu620d8742017-02-10 16:03:51 +05301820static ssize_t
Sachin Prabhu312bbc52017-04-04 02:12:04 -05001821smb2_copychunk_range(const unsigned int xid,
Steve French41c13582013-11-14 00:05:36 -06001822 struct cifsFileInfo *srcfile,
1823 struct cifsFileInfo *trgtfile, u64 src_off,
1824 u64 len, u64 dest_off)
1825{
1826 int rc;
1827 unsigned int ret_data_len;
1828 struct copychunk_ioctl *pcchunk;
Steve French9bf0c9c2013-11-16 18:05:28 -06001829 struct copychunk_ioctl_rsp *retbuf = NULL;
1830 struct cifs_tcon *tcon;
1831 int chunks_copied = 0;
1832 bool chunk_sizes_updated = false;
Sachin Prabhu620d8742017-02-10 16:03:51 +05301833 ssize_t bytes_written, total_bytes_written = 0;
Steve French41c13582013-11-14 00:05:36 -06001834
1835 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1836
1837 if (pcchunk == NULL)
1838 return -ENOMEM;
1839
Christoph Probsta205d502019-05-08 21:36:25 +02001840 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Steve French41c13582013-11-14 00:05:36 -06001841 /* Request a key from the server to identify the source of the copy */
1842 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1843 srcfile->fid.persistent_fid,
1844 srcfile->fid.volatile_fid, pcchunk);
1845
1846 /* Note: request_res_key sets res_key null only if rc !=0 */
1847 if (rc)
Steve French9bf0c9c2013-11-16 18:05:28 -06001848 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001849
1850 /* For now array only one chunk long, will make more flexible later */
Fabian Frederickbc09d142014-12-10 15:41:15 -08001851 pcchunk->ChunkCount = cpu_to_le32(1);
Steve French41c13582013-11-14 00:05:36 -06001852 pcchunk->Reserved = 0;
Steve French41c13582013-11-14 00:05:36 -06001853 pcchunk->Reserved2 = 0;
1854
Steve French9bf0c9c2013-11-16 18:05:28 -06001855 tcon = tlink_tcon(trgtfile->tlink);
1856
1857 while (len > 0) {
1858 pcchunk->SourceOffset = cpu_to_le64(src_off);
1859 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1860 pcchunk->Length =
1861 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1862
1863 /* Request server copy to target from src identified by key */
Ronnie Sahlbergd201d762021-05-19 08:40:11 +10001864 kfree(retbuf);
1865 retbuf = NULL;
Steve French9bf0c9c2013-11-16 18:05:28 -06001866 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
Steve French41c13582013-11-14 00:05:36 -06001867 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01001868 true /* is_fsctl */, (char *)pcchunk,
Steve French153322f2019-03-28 22:32:49 -05001869 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1870 (char **)&retbuf, &ret_data_len);
Steve French9bf0c9c2013-11-16 18:05:28 -06001871 if (rc == 0) {
1872 if (ret_data_len !=
1873 sizeof(struct copychunk_ioctl_rsp)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001874 cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001875 rc = -EIO;
1876 goto cchunk_out;
1877 }
1878 if (retbuf->TotalBytesWritten == 0) {
1879 cifs_dbg(FYI, "no bytes copied\n");
1880 rc = -EIO;
1881 goto cchunk_out;
1882 }
1883 /*
1884 * Check if server claimed to write more than we asked
1885 */
1886 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1887 le32_to_cpu(pcchunk->Length)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001888 cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001889 rc = -EIO;
1890 goto cchunk_out;
1891 }
1892 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
Joe Perchesa0a30362020-04-14 22:42:53 -07001893 cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
Steve French9bf0c9c2013-11-16 18:05:28 -06001894 rc = -EIO;
1895 goto cchunk_out;
1896 }
1897 chunks_copied++;
Steve French41c13582013-11-14 00:05:36 -06001898
Sachin Prabhu620d8742017-02-10 16:03:51 +05301899 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1900 src_off += bytes_written;
1901 dest_off += bytes_written;
1902 len -= bytes_written;
1903 total_bytes_written += bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001904
Sachin Prabhu620d8742017-02-10 16:03:51 +05301905 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
Steve French9bf0c9c2013-11-16 18:05:28 -06001906 le32_to_cpu(retbuf->ChunksWritten),
1907 le32_to_cpu(retbuf->ChunkBytesWritten),
Sachin Prabhu620d8742017-02-10 16:03:51 +05301908 bytes_written);
Steve French9bf0c9c2013-11-16 18:05:28 -06001909 } else if (rc == -EINVAL) {
1910 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1911 goto cchunk_out;
Steve French41c13582013-11-14 00:05:36 -06001912
Steve French9bf0c9c2013-11-16 18:05:28 -06001913 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1914 le32_to_cpu(retbuf->ChunksWritten),
1915 le32_to_cpu(retbuf->ChunkBytesWritten),
1916 le32_to_cpu(retbuf->TotalBytesWritten));
1917
1918 /*
1919 * Check if this is the first request using these sizes,
1920 * (ie check if copy succeed once with original sizes
1921 * and check if the server gave us different sizes after
1922 * we already updated max sizes on previous request).
1923 * if not then why is the server returning an error now
1924 */
1925 if ((chunks_copied != 0) || chunk_sizes_updated)
1926 goto cchunk_out;
1927
1928 /* Check that server is not asking us to grow size */
1929 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1930 tcon->max_bytes_chunk)
1931 tcon->max_bytes_chunk =
1932 le32_to_cpu(retbuf->ChunkBytesWritten);
1933 else
1934 goto cchunk_out; /* server gave us bogus size */
1935
1936 /* No need to change MaxChunks since already set to 1 */
1937 chunk_sizes_updated = true;
Sachin Prabhu2477bc52015-02-04 13:10:26 +00001938 } else
1939 goto cchunk_out;
Steve French9bf0c9c2013-11-16 18:05:28 -06001940 }
1941
1942cchunk_out:
Steve French41c13582013-11-14 00:05:36 -06001943 kfree(pcchunk);
Steve French24df1482016-09-29 04:20:23 -05001944 kfree(retbuf);
Sachin Prabhu620d8742017-02-10 16:03:51 +05301945 if (rc)
1946 return rc;
1947 else
1948 return total_bytes_written;
Steve French41c13582013-11-14 00:05:36 -06001949}
1950
1951static int
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001952smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1953 struct cifs_fid *fid)
1954{
1955 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1956}
1957
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001958static unsigned int
1959smb2_read_data_offset(char *buf)
1960{
1961 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Christoph Probsta205d502019-05-08 21:36:25 +02001962
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001963 return rsp->DataOffset;
1964}
1965
1966static unsigned int
Long Li74dcf412017-11-22 17:38:46 -07001967smb2_read_data_length(char *buf, bool in_remaining)
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001968{
1969 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
Long Li74dcf412017-11-22 17:38:46 -07001970
1971 if (in_remaining)
1972 return le32_to_cpu(rsp->DataRemaining);
1973
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001974 return le32_to_cpu(rsp->DataLength);
1975}
1976
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001977
1978static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001979smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001980 struct cifs_io_parms *parms, unsigned int *bytes_read,
1981 char **buf, int *buf_type)
1982{
Steve Frenchdb8b6312014-09-22 05:13:55 -05001983 parms->persistent_fid = pfid->persistent_fid;
1984 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001985 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1986}
1987
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001988static int
Steve Frenchdb8b6312014-09-22 05:13:55 -05001989smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001990 struct cifs_io_parms *parms, unsigned int *written,
1991 struct kvec *iov, unsigned long nr_segs)
1992{
1993
Steve Frenchdb8b6312014-09-22 05:13:55 -05001994 parms->persistent_fid = pfid->persistent_fid;
1995 parms->volatile_fid = pfid->volatile_fid;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001996 return SMB2_write(xid, parms, written, iov, nr_segs);
1997}
1998
Steve Frenchd43cc792014-08-13 17:16:29 -05001999/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
2000static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
2001 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
2002{
2003 struct cifsInodeInfo *cifsi;
2004 int rc;
2005
2006 cifsi = CIFS_I(inode);
2007
2008 /* if file already sparse don't bother setting sparse again */
2009 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
2010 return true; /* already sparse */
2011
2012 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
2013 return true; /* already not sparse */
2014
2015 /*
2016 * Can't check for sparse support on share the usual way via the
2017 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
2018 * since Samba server doesn't set the flag on the share, yet
2019 * supports the set sparse FSCTL and returns sparse correctly
2020 * in the file attributes. If we fail setting sparse though we
2021 * mark that server does not support sparse files for this share
2022 * to avoid repeatedly sending the unsupported fsctl to server
2023 * if the file is repeatedly extended.
2024 */
2025 if (tcon->broken_sparse_sup)
2026 return false;
2027
2028 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2029 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002030 true /* is_fctl */,
Steve French153322f2019-03-28 22:32:49 -05002031 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Steve Frenchd43cc792014-08-13 17:16:29 -05002032 if (rc) {
2033 tcon->broken_sparse_sup = true;
2034 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
2035 return false;
2036 }
2037
2038 if (setsparse)
2039 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
2040 else
2041 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
2042
2043 return true;
2044}
2045
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002046static int
2047smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
2048 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
2049{
2050 __le64 eof = cpu_to_le64(size);
Steve French3d1a3742014-08-11 21:05:25 -05002051 struct inode *inode;
2052
2053 /*
2054 * If extending file more than one page make sparse. Many Linux fs
2055 * make files sparse by default when extending via ftruncate
2056 */
David Howells2b0143b2015-03-17 22:25:59 +00002057 inode = d_inode(cfile->dentry);
Steve French3d1a3742014-08-11 21:05:25 -05002058
2059 if (!set_alloc && (size > inode->i_size + 8192)) {
Steve French3d1a3742014-08-11 21:05:25 -05002060 __u8 set_sparse = 1;
Steve French3d1a3742014-08-11 21:05:25 -05002061
Steve Frenchd43cc792014-08-13 17:16:29 -05002062 /* whether set sparse succeeds or not, extend the file */
2063 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
Steve French3d1a3742014-08-11 21:05:25 -05002064 }
2065
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002066 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
Ronnie Sahlberg3764cbd2018-09-03 13:33:47 +10002067 cfile->fid.volatile_fid, cfile->pid, &eof);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002068}
2069
Steve French02b16662015-06-27 21:18:36 -07002070static int
2071smb2_duplicate_extents(const unsigned int xid,
2072 struct cifsFileInfo *srcfile,
2073 struct cifsFileInfo *trgtfile, u64 src_off,
2074 u64 len, u64 dest_off)
2075{
2076 int rc;
2077 unsigned int ret_data_len;
Steve Frenchcfc63fc2021-03-26 18:41:55 -05002078 struct inode *inode;
Steve French02b16662015-06-27 21:18:36 -07002079 struct duplicate_extents_to_file dup_ext_buf;
2080 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
2081
2082 /* server fileays advertise duplicate extent support with this flag */
2083 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
2084 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
2085 return -EOPNOTSUPP;
2086
2087 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
2088 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
2089 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
2090 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
2091 dup_ext_buf.ByteCount = cpu_to_le64(len);
Christoph Probsta205d502019-05-08 21:36:25 +02002092 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Steve French02b16662015-06-27 21:18:36 -07002093 src_off, dest_off, len);
2094
Steve Frenchcfc63fc2021-03-26 18:41:55 -05002095 inode = d_inode(trgtfile->dentry);
2096 if (inode->i_size < dest_off + len) {
2097 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
2098 if (rc)
2099 goto duplicate_extents_out;
Steve French02b16662015-06-27 21:18:36 -07002100
Steve Frenchcfc63fc2021-03-26 18:41:55 -05002101 /*
2102 * Although also could set plausible allocation size (i_blocks)
2103 * here in addition to setting the file size, in reflink
2104 * it is likely that the target file is sparse. Its allocation
2105 * size will be queried on next revalidate, but it is important
2106 * to make sure that file's cached size is updated immediately
2107 */
2108 cifs_setsize(inode, dest_off + len);
2109 }
Steve French02b16662015-06-27 21:18:36 -07002110 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
2111 trgtfile->fid.volatile_fid,
2112 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002113 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01002114 (char *)&dup_ext_buf,
Steve French02b16662015-06-27 21:18:36 -07002115 sizeof(struct duplicate_extents_to_file),
Steve French153322f2019-03-28 22:32:49 -05002116 CIFSMaxBufSize, NULL,
Steve French02b16662015-06-27 21:18:36 -07002117 &ret_data_len);
2118
2119 if (ret_data_len > 0)
Christoph Probsta205d502019-05-08 21:36:25 +02002120 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Steve French02b16662015-06-27 21:18:36 -07002121
2122duplicate_extents_out:
2123 return rc;
2124}
Steve French02b16662015-06-27 21:18:36 -07002125
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002126static int
Steve French64a5cfa2013-10-14 15:31:32 -05002127smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
2128 struct cifsFileInfo *cfile)
2129{
2130 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
2131 cfile->fid.volatile_fid);
2132}
2133
2134static int
Steve Frenchb3152e22015-06-24 03:17:02 -05002135smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
2136 struct cifsFileInfo *cfile)
2137{
2138 struct fsctl_set_integrity_information_req integr_info;
Steve Frenchb3152e22015-06-24 03:17:02 -05002139 unsigned int ret_data_len;
2140
2141 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
2142 integr_info.Flags = 0;
2143 integr_info.Reserved = 0;
2144
2145 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2146 cfile->fid.volatile_fid,
2147 FSCTL_SET_INTEGRITY_INFORMATION,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002148 true /* is_fsctl */,
Aurelien Aptel51146622017-02-28 15:08:41 +01002149 (char *)&integr_info,
Steve Frenchb3152e22015-06-24 03:17:02 -05002150 sizeof(struct fsctl_set_integrity_information_req),
Steve French153322f2019-03-28 22:32:49 -05002151 CIFSMaxBufSize, NULL,
Steve Frenchb3152e22015-06-24 03:17:02 -05002152 &ret_data_len);
2153
2154}
2155
Steve Frenche02789a2018-08-09 14:33:12 -05002156/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
2157#define GMT_TOKEN_SIZE 50
2158
Steve French153322f2019-03-28 22:32:49 -05002159#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
2160
Steve Frenche02789a2018-08-09 14:33:12 -05002161/*
2162 * Input buffer contains (empty) struct smb_snapshot array with size filled in
2163 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
2164 */
Steve Frenchb3152e22015-06-24 03:17:02 -05002165static int
Steve French834170c2016-09-30 21:14:26 -05002166smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2167 struct cifsFileInfo *cfile, void __user *ioc_buf)
2168{
2169 char *retbuf = NULL;
2170 unsigned int ret_data_len = 0;
2171 int rc;
Steve French153322f2019-03-28 22:32:49 -05002172 u32 max_response_size;
Steve French834170c2016-09-30 21:14:26 -05002173 struct smb_snapshot_array snapshot_in;
2174
Steve French973189a2019-04-04 00:41:04 -05002175 /*
2176 * On the first query to enumerate the list of snapshots available
2177 * for this volume the buffer begins with 0 (number of snapshots
2178 * which can be returned is zero since at that point we do not know
2179 * how big the buffer needs to be). On the second query,
2180 * it (ret_data_len) is set to number of snapshots so we can
2181 * know to set the maximum response size larger (see below).
2182 */
Steve French153322f2019-03-28 22:32:49 -05002183 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
2184 return -EFAULT;
2185
2186 /*
2187 * Note that for snapshot queries that servers like Azure expect that
2188 * the first query be minimal size (and just used to get the number/size
2189 * of previous versions) so response size must be specified as EXACTLY
2190 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2191 * of eight bytes.
2192 */
2193 if (ret_data_len == 0)
2194 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
2195 else
2196 max_response_size = CIFSMaxBufSize;
2197
Steve French834170c2016-09-30 21:14:26 -05002198 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2199 cfile->fid.volatile_fid,
2200 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002201 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002202 NULL, 0 /* no input data */, max_response_size,
Steve French834170c2016-09-30 21:14:26 -05002203 (char **)&retbuf,
2204 &ret_data_len);
2205 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
2206 rc, ret_data_len);
2207 if (rc)
2208 return rc;
2209
2210 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
2211 /* Fixup buffer */
2212 if (copy_from_user(&snapshot_in, ioc_buf,
2213 sizeof(struct smb_snapshot_array))) {
2214 rc = -EFAULT;
2215 kfree(retbuf);
2216 return rc;
2217 }
Steve French834170c2016-09-30 21:14:26 -05002218
Steve Frenche02789a2018-08-09 14:33:12 -05002219 /*
2220 * Check for min size, ie not large enough to fit even one GMT
2221 * token (snapshot). On the first ioctl some users may pass in
2222 * smaller size (or zero) to simply get the size of the array
2223 * so the user space caller can allocate sufficient memory
2224 * and retry the ioctl again with larger array size sufficient
2225 * to hold all of the snapshot GMT tokens on the second try.
2226 */
2227 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2228 ret_data_len = sizeof(struct smb_snapshot_array);
2229
2230 /*
2231 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2232 * the snapshot array (of 50 byte GMT tokens) each
2233 * representing an available previous version of the data
2234 */
2235 if (ret_data_len > (snapshot_in.snapshot_array_size +
2236 sizeof(struct smb_snapshot_array)))
2237 ret_data_len = snapshot_in.snapshot_array_size +
2238 sizeof(struct smb_snapshot_array);
Steve French834170c2016-09-30 21:14:26 -05002239
2240 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2241 rc = -EFAULT;
2242 }
2243
2244 kfree(retbuf);
2245 return rc;
2246}
2247
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002248
2249
2250static int
2251smb3_notify(const unsigned int xid, struct file *pfile,
2252 void __user *ioc_buf)
2253{
2254 struct smb3_notify notify;
2255 struct dentry *dentry = pfile->f_path.dentry;
2256 struct inode *inode = file_inode(pfile);
Al Virof6a9bc32021-03-05 17:36:04 -05002257 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002258 struct cifs_open_parms oparms;
2259 struct cifs_fid fid;
2260 struct cifs_tcon *tcon;
Al Virof6a9bc32021-03-05 17:36:04 -05002261 const unsigned char *path;
2262 void *page = alloc_dentry_path();
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002263 __le16 *utf16_path = NULL;
2264 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2265 int rc = 0;
2266
Al Virof6a9bc32021-03-05 17:36:04 -05002267 path = build_path_from_dentry(dentry, page);
2268 if (IS_ERR(path)) {
2269 rc = PTR_ERR(path);
2270 goto notify_exit;
2271 }
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002272
Eugene Korenevskya637f4a2021-04-16 10:35:30 +03002273 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002274 if (utf16_path == NULL) {
2275 rc = -ENOMEM;
2276 goto notify_exit;
2277 }
2278
2279 if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
2280 rc = -EFAULT;
2281 goto notify_exit;
2282 }
2283
2284 tcon = cifs_sb_master_tcon(cifs_sb);
2285 oparms.tcon = tcon;
Steve French4ef9b4f2020-07-07 18:08:46 -05002286 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002287 oparms.disposition = FILE_OPEN;
2288 oparms.create_options = cifs_create_options(cifs_sb, 0);
2289 oparms.fid = &fid;
2290 oparms.reconnect = false;
2291
Aurelien Aptel69dda302020-03-02 17:53:22 +01002292 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
2293 NULL);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002294 if (rc)
2295 goto notify_exit;
2296
2297 rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
2298 notify.watch_tree, notify.completion_filter);
2299
2300 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2301
2302 cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
2303
2304notify_exit:
Al Virof6a9bc32021-03-05 17:36:04 -05002305 free_dentry_path(page);
Steve Frenchd26c2dd2020-02-06 06:00:14 -06002306 kfree(utf16_path);
2307 return rc;
2308}
2309
Steve French834170c2016-09-30 21:14:26 -05002310static int
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002311smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2312 const char *path, struct cifs_sb_info *cifs_sb,
2313 struct cifs_fid *fid, __u16 search_flags,
2314 struct cifs_search_info *srch_inf)
2315{
2316 __le16 *utf16_path;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002317 struct smb_rqst rqst[2];
2318 struct kvec rsp_iov[2];
2319 int resp_buftype[2];
2320 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2321 struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
2322 int rc, flags = 0;
2323 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002324 struct cifs_open_parms oparms;
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002325 struct smb2_query_directory_rsp *qd_rsp = NULL;
2326 struct smb2_create_rsp *op_rsp = NULL;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002327 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002328
2329 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2330 if (!utf16_path)
2331 return -ENOMEM;
2332
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002333 if (smb3_encryption_required(tcon))
2334 flags |= CIFS_TRANSFORM_REQ;
2335
2336 memset(rqst, 0, sizeof(rqst));
2337 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
2338 memset(rsp_iov, 0, sizeof(rsp_iov));
2339
2340 /* Open */
2341 memset(&open_iov, 0, sizeof(open_iov));
2342 rqst[0].rq_iov = open_iov;
2343 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2344
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002345 oparms.tcon = tcon;
2346 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2347 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002348 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002349 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002350 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002351
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002352 rc = SMB2_open_init(tcon, server,
2353 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002354 if (rc)
2355 goto qdf_free;
2356 smb2_set_next_command(tcon, &rqst[0]);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002357
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002358 /* Query directory */
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002359 srch_inf->entries_in_buffer = 0;
Aurelien Aptel05957512018-05-17 16:35:07 +02002360 srch_inf->index_of_last_entry = 2;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002361
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002362 memset(&qd_iov, 0, sizeof(qd_iov));
2363 rqst[1].rq_iov = qd_iov;
2364 rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
2365
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002366 rc = SMB2_query_directory_init(xid, tcon, server,
2367 &rqst[1],
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002368 COMPOUND_FID, COMPOUND_FID,
2369 0, srch_inf->info_level);
2370 if (rc)
2371 goto qdf_free;
2372
2373 smb2_set_related(&rqst[1]);
2374
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002375 rc = compound_send_recv(xid, tcon->ses, server,
2376 flags, 2, rqst,
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002377 resp_buftype, rsp_iov);
2378
2379 /* If the open failed there is nothing to do */
2380 op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
2381 if (op_rsp == NULL || op_rsp->sync_hdr.Status != STATUS_SUCCESS) {
2382 cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
2383 goto qdf_free;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002384 }
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002385 fid->persistent_fid = op_rsp->PersistentFileId;
2386 fid->volatile_fid = op_rsp->VolatileFileId;
2387
2388 /* Anything else than ENODATA means a genuine error */
2389 if (rc && rc != -ENODATA) {
2390 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2391 cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
2392 trace_smb3_query_dir_err(xid, fid->persistent_fid,
2393 tcon->tid, tcon->ses->Suid, 0, 0, rc);
2394 goto qdf_free;
2395 }
2396
Shyam Prasad N1be1fa42020-03-09 01:35:09 -07002397 atomic_inc(&tcon->num_remote_opens);
2398
Ronnie Sahlberg37478602020-01-08 13:08:06 +10002399 qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
2400 if (qd_rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
2401 trace_smb3_query_dir_done(xid, fid->persistent_fid,
2402 tcon->tid, tcon->ses->Suid, 0, 0);
2403 srch_inf->endOfSearch = true;
2404 rc = 0;
2405 goto qdf_free;
2406 }
2407
2408 rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
2409 srch_inf);
2410 if (rc) {
2411 trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
2412 tcon->ses->Suid, 0, 0, rc);
2413 goto qdf_free;
2414 }
2415 resp_buftype[1] = CIFS_NO_BUFFER;
2416
2417 trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
2418 tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
2419
2420 qdf_free:
2421 kfree(utf16_path);
2422 SMB2_open_free(&rqst[0]);
2423 SMB2_query_directory_free(&rqst[1]);
2424 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2425 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002426 return rc;
2427}
2428
2429static int
2430smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2431 struct cifs_fid *fid, __u16 search_flags,
2432 struct cifs_search_info *srch_inf)
2433{
2434 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2435 fid->volatile_fid, 0, srch_inf);
2436}
2437
2438static int
2439smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2440 struct cifs_fid *fid)
2441{
2442 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2443}
2444
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002445/*
Christoph Probsta205d502019-05-08 21:36:25 +02002446 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2447 * the number of credits and return true. Otherwise - return false.
2448 */
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002449static bool
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002450smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002451{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002452 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Shyam Prasad N6d82c272021-02-03 23:20:46 -08002453 int scredits, in_flight;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002454
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002455 if (shdr->Status != STATUS_PENDING)
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002456 return false;
2457
Pavel Shilovsky66265f12019-01-23 17:11:16 -08002458 if (shdr->CreditRequest) {
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002459 spin_lock(&server->req_lock);
Pavel Shilovsky31473fc2016-10-24 15:33:04 -07002460 server->credits += le16_to_cpu(shdr->CreditRequest);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -08002461 scredits = server->credits;
Shyam Prasad N6d82c272021-02-03 23:20:46 -08002462 in_flight = server->in_flight;
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002463 spin_unlock(&server->req_lock);
2464 wake_up(&server->request_q);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -08002465
2466 trace_smb3_add_credits(server->CurrentMid,
Shyam Prasad N6d82c272021-02-03 23:20:46 -08002467 server->conn_id, server->hostname, scredits,
2468 le16_to_cpu(shdr->CreditRequest), in_flight);
Shyam Prasad Ncd7b6992020-11-12 08:56:49 -08002469 cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
2470 __func__, le16_to_cpu(shdr->CreditRequest), scredits);
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07002471 }
2472
2473 return true;
2474}
2475
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002476static bool
2477smb2_is_session_expired(char *buf)
2478{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10002479 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002480
Mark Symsd81243c2018-05-24 09:47:31 +01002481 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2482 shdr->Status != STATUS_USER_SESSION_DELETED)
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002483 return false;
2484
Steve Frenche68a9322018-07-30 14:23:58 -05002485 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2486 le16_to_cpu(shdr->Command),
2487 le64_to_cpu(shdr->MessageId));
Mark Symsd81243c2018-05-24 09:47:31 +01002488 cifs_dbg(FYI, "Session expired or deleted\n");
Steve Frenche68a9322018-07-30 14:23:58 -05002489
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07002490 return true;
2491}
2492
Rohith Surabattula8e670f72020-09-18 05:37:28 +00002493static bool
2494smb2_is_status_io_timeout(char *buf)
2495{
2496 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
2497
2498 if (shdr->Status == STATUS_IO_TIMEOUT)
2499 return true;
2500 else
2501 return false;
2502}
2503
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002504static void
2505smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
2506{
2507 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
2508 struct list_head *tmp, *tmp1;
2509 struct cifs_ses *ses;
2510 struct cifs_tcon *tcon;
2511
Steve Frenchf1a08652021-02-20 18:52:15 -06002512 if (shdr->Status != STATUS_NETWORK_NAME_DELETED)
2513 return;
2514
2515 spin_lock(&cifs_tcp_ses_lock);
2516 list_for_each(tmp, &server->smb_ses_list) {
2517 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
2518 list_for_each(tmp1, &ses->tcon_list) {
2519 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
2520 if (tcon->tid == shdr->TreeId) {
2521 tcon->need_reconnect = true;
2522 spin_unlock(&cifs_tcp_ses_lock);
2523 pr_warn_once("Server share %s deleted.\n",
2524 tcon->treeName);
2525 return;
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002526 }
2527 }
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002528 }
Steve Frenchf1a08652021-02-20 18:52:15 -06002529 spin_unlock(&cifs_tcp_ses_lock);
Rohith Surabattula9e550b02021-02-16 10:40:45 +00002530}
2531
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002532static int
2533smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2534 struct cifsInodeInfo *cinode)
2535{
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002536 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2537 return SMB2_lease_break(0, tcon, cinode->lease_key,
2538 smb2_get_lease_state(cinode));
2539
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002540 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2541 fid->volatile_fid,
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002542 CIFS_CACHE_READ(cinode) ? 1 : 0);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002543}
2544
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002545void
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002546smb2_set_related(struct smb_rqst *rqst)
2547{
2548 struct smb2_sync_hdr *shdr;
2549
2550 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002551 if (shdr == NULL) {
2552 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2553 return;
2554 }
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002555 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2556}
2557
2558char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2559
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002560void
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002561smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002562{
2563 struct smb2_sync_hdr *shdr;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002564 struct cifs_ses *ses = tcon->ses;
2565 struct TCP_Server_Info *server = ses->server;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002566 unsigned long len = smb_rqst_len(server, rqst);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002567 int i, num_padding;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002568
Ronnie Sahlberg88a92c92019-07-16 10:41:46 +10002569 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
2570 if (shdr == NULL) {
2571 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2572 return;
2573 }
2574
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002575 /* SMB headers in a compound are 8 byte aligned. */
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002576
2577 /* No padding needed */
2578 if (!(len & 7))
2579 goto finished;
2580
2581 num_padding = 8 - (len & 7);
2582 if (!smb3_encryption_required(tcon)) {
2583 /*
2584 * If we do not have encryption then we can just add an extra
2585 * iov for the padding.
2586 */
2587 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2588 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2589 rqst->rq_nvec++;
2590 len += num_padding;
2591 } else {
2592 /*
2593 * We can not add a small padding iov for the encryption case
2594 * because the encryption framework can not handle the padding
2595 * iovs.
2596 * We have to flatten this into a single buffer and add
2597 * the padding to it.
2598 */
2599 for (i = 1; i < rqst->rq_nvec; i++) {
2600 memcpy(rqst->rq_iov[0].iov_base +
2601 rqst->rq_iov[0].iov_len,
2602 rqst->rq_iov[i].iov_base,
2603 rqst->rq_iov[i].iov_len);
2604 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
Ronnie Sahlberg271b9c02018-12-18 17:49:05 -06002605 }
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002606 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2607 0, num_padding);
2608 rqst->rq_iov[0].iov_len += num_padding;
2609 len += num_padding;
2610 rqst->rq_nvec = 1;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002611 }
2612
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002613 finished:
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002614 shdr->NextCommand = cpu_to_le32(len);
2615}
2616
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002617/*
2618 * Passes the query info response back to the caller on success.
2619 * Caller need to free this with free_rsp_buf().
2620 */
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002621int
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002622smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2623 __le16 *utf16_path, u32 desired_access,
2624 u32 class, u32 type, u32 output_len,
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002625 struct kvec *rsp, int *buftype,
2626 struct cifs_sb_info *cifs_sb)
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002627{
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002628 struct cifs_ses *ses = tcon->ses;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002629 struct TCP_Server_Info *server = cifs_pick_channel(ses);
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03002630 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002631 struct smb_rqst rqst[3];
2632 int resp_buftype[3];
2633 struct kvec rsp_iov[3];
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002634 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002635 struct kvec qi_iov[1];
2636 struct kvec close_iov[1];
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002637 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002638 struct cifs_open_parms oparms;
2639 struct cifs_fid fid;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002640 int rc;
2641
2642 if (smb3_encryption_required(tcon))
2643 flags |= CIFS_TRANSFORM_REQ;
2644
2645 memset(rqst, 0, sizeof(rqst));
Ronnie Sahlbergc5a5f382018-09-03 13:33:41 +10002646 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002647 memset(rsp_iov, 0, sizeof(rsp_iov));
2648
2649 memset(&open_iov, 0, sizeof(open_iov));
2650 rqst[0].rq_iov = open_iov;
Ronnie Sahlberg4d8dfaf2018-08-21 11:49:21 +10002651 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002652
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002653 oparms.tcon = tcon;
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002654 oparms.desired_access = desired_access;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002655 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002656 oparms.create_options = cifs_create_options(cifs_sb, 0);
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002657 oparms.fid = &fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04002658 oparms.reconnect = false;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04002659
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002660 rc = SMB2_open_init(tcon, server,
2661 &rqst[0], &oplock, &oparms, utf16_path);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002662 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002663 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002664 smb2_set_next_command(tcon, &rqst[0]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002665
2666 memset(&qi_iov, 0, sizeof(qi_iov));
2667 rqst[1].rq_iov = qi_iov;
2668 rqst[1].rq_nvec = 1;
2669
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002670 rc = SMB2_query_info_init(tcon, server,
2671 &rqst[1], COMPOUND_FID, COMPOUND_FID,
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002672 class, type, 0,
2673 output_len, 0,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05002674 NULL);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002675 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002676 goto qic_exit;
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10002677 smb2_set_next_command(tcon, &rqst[1]);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002678 smb2_set_related(&rqst[1]);
2679
2680 memset(&close_iov, 0, sizeof(close_iov));
2681 rqst[2].rq_iov = close_iov;
2682 rqst[2].rq_nvec = 1;
2683
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002684 rc = SMB2_close_init(tcon, server,
2685 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002686 if (rc)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002687 goto qic_exit;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002688 smb2_set_related(&rqst[2]);
2689
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002690 rc = compound_send_recv(xid, ses, server,
2691 flags, 3, rqst,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002692 resp_buftype, rsp_iov);
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002693 if (rc) {
2694 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Steve French7dcc82c2019-09-11 00:07:36 -05002695 if (rc == -EREMCHG) {
2696 tcon->need_reconnect = true;
Joe Perchesa0a30362020-04-14 22:42:53 -07002697 pr_warn_once("server share %s deleted\n",
2698 tcon->treeName);
Steve French7dcc82c2019-09-11 00:07:36 -05002699 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002700 goto qic_exit;
Ronnie Sahlbergf9793b62018-11-27 09:52:04 +10002701 }
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002702 *rsp = rsp_iov[1];
2703 *buftype = resp_buftype[1];
2704
2705 qic_exit:
2706 SMB2_open_free(&rqst[0]);
2707 SMB2_query_info_free(&rqst[1]);
2708 SMB2_close_free(&rqst[2]);
2709 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2710 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2711 return rc;
2712}
2713
2714static int
2715smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002716 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002717{
2718 struct smb2_query_info_rsp *rsp;
2719 struct smb2_fs_full_size_info *info = NULL;
2720 __le16 utf16_path = 0; /* Null - open root of share */
2721 struct kvec rsp_iov = {NULL, 0};
2722 int buftype = CIFS_NO_BUFFER;
2723 int rc;
2724
2725
2726 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2727 FILE_READ_ATTRIBUTES,
2728 FS_FULL_SIZE_INFORMATION,
2729 SMB2_O_INFO_FILESYSTEM,
2730 sizeof(struct smb2_fs_full_size_info),
Steve French87f93d82020-02-04 13:02:59 -06002731 &rsp_iov, &buftype, cifs_sb);
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002732 if (rc)
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002733 goto qfs_exit;
2734
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002735 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002736 buf->f_type = SMB2_MAGIC_NUMBER;
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002737 info = (struct smb2_fs_full_size_info *)(
2738 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2739 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2740 le32_to_cpu(rsp->OutputBufferLength),
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002741 &rsp_iov,
Ronnie Sahlberg730928c2018-08-08 15:07:49 +10002742 sizeof(struct smb2_fs_full_size_info));
2743 if (!rc)
2744 smb2_copy_fs_info_to_kstatfs(info, buf);
2745
2746qfs_exit:
Ronnie Sahlberg07d3b2e2018-12-20 22:03:04 -06002747 free_rsp_buf(buftype, rsp_iov.iov_base);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002748 return rc;
2749}
2750
Steve French2d304212018-06-24 23:28:12 -05002751static int
2752smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
Amir Goldstein0f060932020-02-03 21:46:43 +02002753 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
Steve French2d304212018-06-24 23:28:12 -05002754{
2755 int rc;
2756 __le16 srch_path = 0; /* Null - open root of share */
2757 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2758 struct cifs_open_parms oparms;
2759 struct cifs_fid fid;
2760
2761 if (!tcon->posix_extensions)
Amir Goldstein0f060932020-02-03 21:46:43 +02002762 return smb2_queryfs(xid, tcon, cifs_sb, buf);
Steve French2d304212018-06-24 23:28:12 -05002763
2764 oparms.tcon = tcon;
2765 oparms.desired_access = FILE_READ_ATTRIBUTES;
2766 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02002767 oparms.create_options = cifs_create_options(cifs_sb, 0);
Steve French2d304212018-06-24 23:28:12 -05002768 oparms.fid = &fid;
2769 oparms.reconnect = false;
2770
Aurelien Aptel69dda302020-03-02 17:53:22 +01002771 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
2772 NULL, NULL);
Steve French2d304212018-06-24 23:28:12 -05002773 if (rc)
2774 return rc;
2775
2776 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2777 fid.volatile_fid, buf);
2778 buf->f_type = SMB2_MAGIC_NUMBER;
2779 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2780 return rc;
2781}
Steve French2d304212018-06-24 23:28:12 -05002782
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07002783static bool
2784smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2785{
2786 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2787 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2788}
2789
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002790static int
2791smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2792 __u64 length, __u32 type, int lock, int unlock, bool wait)
2793{
2794 if (unlock && !lock)
2795 type = SMB2_LOCKFLAG_UNLOCK;
2796 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2797 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2798 current->tgid, length, offset, type, wait);
2799}
2800
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002801static void
2802smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2803{
2804 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2805}
2806
2807static void
2808smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2809{
2810 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2811}
2812
2813static void
2814smb2_new_lease_key(struct cifs_fid *fid)
2815{
Steve Frenchfa70b872016-09-22 00:39:34 -05002816 generate_random_uuid(fid->lease_key);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07002817}
2818
Aurelien Aptel9d496402017-02-13 16:16:49 +01002819static int
2820smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2821 const char *search_name,
2822 struct dfs_info3_param **target_nodes,
2823 unsigned int *num_of_nodes,
2824 const struct nls_table *nls_codepage, int remap)
2825{
2826 int rc;
2827 __le16 *utf16_path = NULL;
2828 int utf16_path_len = 0;
2829 struct cifs_tcon *tcon;
2830 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2831 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2832 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2833
Christoph Probsta205d502019-05-08 21:36:25 +02002834 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002835
2836 /*
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002837 * Try to use the IPC tcon, otherwise just use any
Aurelien Aptel9d496402017-02-13 16:16:49 +01002838 */
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002839 tcon = ses->tcon_ipc;
2840 if (tcon == NULL) {
2841 spin_lock(&cifs_tcp_ses_lock);
2842 tcon = list_first_entry_or_null(&ses->tcon_list,
2843 struct cifs_tcon,
2844 tcon_list);
2845 if (tcon)
2846 tcon->tc_count++;
2847 spin_unlock(&cifs_tcp_ses_lock);
2848 }
Aurelien Aptel9d496402017-02-13 16:16:49 +01002849
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002850 if (tcon == NULL) {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002851 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2852 ses);
2853 rc = -ENOTCONN;
2854 goto out;
2855 }
2856
2857 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2858 &utf16_path_len,
2859 nls_codepage, remap);
2860 if (!utf16_path) {
2861 rc = -ENOMEM;
2862 goto out;
2863 }
2864
2865 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2866 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2867 if (!dfs_req) {
2868 rc = -ENOMEM;
2869 goto out;
2870 }
2871
2872 /* Highest DFS referral version understood */
2873 dfs_req->MaxReferralLevel = DFS_VERSION;
2874
2875 /* Path to resolve in an UTF-16 null-terminated string */
2876 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2877
2878 do {
Aurelien Aptel9d496402017-02-13 16:16:49 +01002879 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2880 FSCTL_DFS_GET_REFERRALS,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002881 true /* is_fsctl */,
Steve French153322f2019-03-28 22:32:49 -05002882 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Aurelien Aptel9d496402017-02-13 16:16:49 +01002883 (char **)&dfs_rsp, &dfs_rsp_size);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002884 } while (rc == -EAGAIN);
2885
2886 if (rc) {
Steve French2564f2f2018-03-21 23:16:36 -05002887 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002888 cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002889 goto out;
2890 }
2891
2892 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2893 num_of_nodes, target_nodes,
2894 nls_codepage, remap, search_name,
2895 true /* is_unicode */);
2896 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10002897 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Aurelien Aptel9d496402017-02-13 16:16:49 +01002898 goto out;
2899 }
2900
2901 out:
Aurelien Aptel63a83b82018-01-24 13:46:11 +01002902 if (tcon && !tcon->ipc) {
2903 /* ipc tcons are not refcounted */
Aurelien Aptel9d496402017-02-13 16:16:49 +01002904 spin_lock(&cifs_tcp_ses_lock);
2905 tcon->tc_count--;
2906 spin_unlock(&cifs_tcp_ses_lock);
2907 }
2908 kfree(utf16_path);
2909 kfree(dfs_req);
2910 kfree(dfs_rsp);
2911 return rc;
2912}
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002913
2914static int
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002915parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2916 u32 plen, char **target_path,
2917 struct cifs_sb_info *cifs_sb)
2918{
2919 unsigned int len;
2920
2921 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2922 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2923
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002924 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2925 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2926 le64_to_cpu(symlink_buf->InodeType));
2927 return -EOPNOTSUPP;
2928 }
2929
2930 *target_path = cifs_strndup_from_utf16(
2931 symlink_buf->PathBuffer,
2932 len, true, cifs_sb->local_nls);
2933 if (!(*target_path))
2934 return -ENOMEM;
2935
2936 convert_delimiter(*target_path, '/');
2937 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2938
2939 return 0;
2940}
2941
2942static int
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002943parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2944 u32 plen, char **target_path,
2945 struct cifs_sb_info *cifs_sb)
2946{
2947 unsigned int sub_len;
2948 unsigned int sub_offset;
2949
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002950 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10002951
2952 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2953 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2954 if (sub_offset + 20 > plen ||
2955 sub_offset + sub_len + 20 > plen) {
2956 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2957 return -EIO;
2958 }
2959
2960 *target_path = cifs_strndup_from_utf16(
2961 symlink_buf->PathBuffer + sub_offset,
2962 sub_len, true, cifs_sb->local_nls);
2963 if (!(*target_path))
2964 return -ENOMEM;
2965
2966 convert_delimiter(*target_path, '/');
2967 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2968
2969 return 0;
2970}
2971
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002972static int
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002973parse_reparse_point(struct reparse_data_buffer *buf,
2974 u32 plen, char **target_path,
2975 struct cifs_sb_info *cifs_sb)
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002976{
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002977 if (plen < sizeof(struct reparse_data_buffer)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07002978 cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n",
2979 plen);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002980 return -EIO;
2981 }
2982
2983 if (plen < le16_to_cpu(buf->ReparseDataLength) +
2984 sizeof(struct reparse_data_buffer)) {
Joe Perchesa0a30362020-04-14 22:42:53 -07002985 cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n",
2986 plen);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002987 return -EIO;
2988 }
2989
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002990 /* See MS-FSCC 2.1.2 */
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002991 switch (le32_to_cpu(buf->ReparseTag)) {
2992 case IO_REPARSE_TAG_NFS:
2993 return parse_reparse_posix(
2994 (struct reparse_posix_data *)buf,
Steve Frenchd5ecebc2019-06-28 02:04:18 -05002995 plen, target_path, cifs_sb);
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10002996 case IO_REPARSE_TAG_SYMLINK:
2997 return parse_reparse_symlink(
2998 (struct reparse_symlink_data_buffer *)buf,
2999 plen, target_path, cifs_sb);
3000 default:
Joe Perchesa0a30362020-04-14 22:42:53 -07003001 cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n",
3002 le32_to_cpu(buf->ReparseTag));
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10003003 return -EOPNOTSUPP;
3004 }
Steve Frenchd5ecebc2019-06-28 02:04:18 -05003005}
3006
Pavel Shilovsky78932422016-07-24 10:37:38 +03003007#define SMB2_SYMLINK_STRUCT_SIZE \
3008 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
3009
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003010static int
3011smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003012 struct cifs_sb_info *cifs_sb, const char *full_path,
3013 char **target_path, bool is_reparse_point)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003014{
3015 int rc;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003016 __le16 *utf16_path = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003017 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3018 struct cifs_open_parms oparms;
3019 struct cifs_fid fid;
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10003020 struct kvec err_iov = {NULL, 0};
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003021 struct smb2_err_rsp *err_buf = NULL;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003022 struct smb2_symlink_err_rsp *symlink;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003023 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Pavel Shilovsky78932422016-07-24 10:37:38 +03003024 unsigned int sub_len;
3025 unsigned int sub_offset;
3026 unsigned int print_len;
3027 unsigned int print_offset;
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03003028 int flags = CIFS_CP_CREATE_CLOSE_OP;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003029 struct smb_rqst rqst[3];
3030 int resp_buftype[3];
3031 struct kvec rsp_iov[3];
3032 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
3033 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
3034 struct kvec close_iov[1];
3035 struct smb2_create_rsp *create_rsp;
3036 struct smb2_ioctl_rsp *ioctl_rsp;
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003037 struct reparse_data_buffer *reparse_buf;
Amir Goldstein0f060932020-02-03 21:46:43 +02003038 int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003039 u32 plen;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003040
3041 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
3042
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003043 *target_path = NULL;
3044
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003045 if (smb3_encryption_required(tcon))
3046 flags |= CIFS_TRANSFORM_REQ;
3047
3048 memset(rqst, 0, sizeof(rqst));
3049 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
3050 memset(rsp_iov, 0, sizeof(rsp_iov));
3051
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003052 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
3053 if (!utf16_path)
3054 return -ENOMEM;
3055
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003056 /* Open */
3057 memset(&open_iov, 0, sizeof(open_iov));
3058 rqst[0].rq_iov = open_iov;
3059 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
3060
3061 memset(&oparms, 0, sizeof(oparms));
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003062 oparms.tcon = tcon;
3063 oparms.desired_access = FILE_READ_ATTRIBUTES;
3064 oparms.disposition = FILE_OPEN;
Amir Goldstein0f060932020-02-03 21:46:43 +02003065 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003066 oparms.fid = &fid;
3067 oparms.reconnect = false;
3068
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003069 rc = SMB2_open_init(tcon, server,
3070 &rqst[0], &oplock, &oparms, utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003071 if (rc)
3072 goto querty_exit;
3073 smb2_set_next_command(tcon, &rqst[0]);
3074
3075
3076 /* IOCTL */
3077 memset(&io_iov, 0, sizeof(io_iov));
3078 rqst[1].rq_iov = io_iov;
3079 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
3080
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003081 rc = SMB2_ioctl_init(tcon, server,
3082 &rqst[1], fid.persistent_fid,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003083 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
Ronnie Sahlberg731b82b2020-01-08 13:08:07 +10003084 true /* is_fctl */, NULL, 0,
3085 CIFSMaxBufSize -
3086 MAX_SMB2_CREATE_RESPONSE_SIZE -
3087 MAX_SMB2_CLOSE_RESPONSE_SIZE);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003088 if (rc)
3089 goto querty_exit;
3090
3091 smb2_set_next_command(tcon, &rqst[1]);
3092 smb2_set_related(&rqst[1]);
3093
3094
3095 /* Close */
3096 memset(&close_iov, 0, sizeof(close_iov));
3097 rqst[2].rq_iov = close_iov;
3098 rqst[2].rq_nvec = 1;
3099
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003100 rc = SMB2_close_init(tcon, server,
3101 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003102 if (rc)
3103 goto querty_exit;
3104
3105 smb2_set_related(&rqst[2]);
3106
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003107 rc = compound_send_recv(xid, tcon->ses, server,
3108 flags, 3, rqst,
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003109 resp_buftype, rsp_iov);
3110
3111 create_rsp = rsp_iov[0].iov_base;
3112 if (create_rsp && create_rsp->sync_hdr.Status)
3113 err_iov = rsp_iov[0];
3114 ioctl_rsp = rsp_iov[1].iov_base;
3115
3116 /*
3117 * Open was successful and we got an ioctl response.
3118 */
3119 if ((rc == 0) && (is_reparse_point)) {
3120 /* See MS-FSCC 2.3.23 */
3121
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003122 reparse_buf = (struct reparse_data_buffer *)
3123 ((char *)ioctl_rsp +
3124 le32_to_cpu(ioctl_rsp->OutputOffset));
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003125 plen = le32_to_cpu(ioctl_rsp->OutputCount);
3126
3127 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
3128 rsp_iov[1].iov_len) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10003129 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
Ronnie Sahlberg5de254d2019-06-27 14:57:02 +10003130 plen);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003131 rc = -EIO;
3132 goto querty_exit;
3133 }
3134
Ronnie Sahlbergf5f111c2019-07-07 07:45:42 +10003135 rc = parse_reparse_point(reparse_buf, plen, target_path,
3136 cifs_sb);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003137 goto querty_exit;
3138 }
3139
Gustavo A. R. Silva0d568cd2018-04-13 10:13:29 -05003140 if (!rc || !err_iov.iov_base) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003141 rc = -ENOENT;
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003142 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003143 }
Pavel Shilovsky78932422016-07-24 10:37:38 +03003144
Ronnie Sahlberg91cb74f2018-04-13 09:03:19 +10003145 err_buf = err_iov.iov_base;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003146 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003147 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10003148 rc = -EINVAL;
3149 goto querty_exit;
3150 }
3151
3152 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
3153 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
3154 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
3155 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003156 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003157 }
3158
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003159 /* open must fail on symlink - reset rc */
3160 rc = 0;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003161 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
3162 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
Pavel Shilovsky78932422016-07-24 10:37:38 +03003163 print_len = le16_to_cpu(symlink->PrintNameLength);
3164 print_offset = le16_to_cpu(symlink->PrintNameOffset);
3165
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003166 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10003167 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003168 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003169 }
3170
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10003171 if (err_iov.iov_len <
3172 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
Ronnie Sahlbergdf070af2019-07-09 18:41:11 +10003173 rc = -EINVAL;
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003174 goto querty_exit;
Pavel Shilovsky78932422016-07-24 10:37:38 +03003175 }
3176
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003177 *target_path = cifs_strndup_from_utf16(
3178 (char *)symlink->PathBuffer + sub_offset,
3179 sub_len, true, cifs_sb->local_nls);
3180 if (!(*target_path)) {
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003181 rc = -ENOMEM;
3182 goto querty_exit;
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003183 }
3184 convert_delimiter(*target_path, '/');
3185 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
Ronnie Sahlberg9d874c32018-06-08 13:21:18 +10003186
3187 querty_exit:
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003188 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003189 kfree(utf16_path);
Ronnie Sahlbergebaf5462019-04-10 08:44:46 +10003190 SMB2_open_free(&rqst[0]);
3191 SMB2_ioctl_free(&rqst[1]);
3192 SMB2_close_free(&rqst[2]);
3193 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
3194 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
3195 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04003196 return rc;
3197}
3198
Steve French2e4564b2020-10-22 22:03:14 -05003199int
3200smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
3201 struct cifs_sb_info *cifs_sb, const char *full_path,
3202 __u32 *tag)
3203{
3204 int rc;
3205 __le16 *utf16_path = NULL;
3206 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3207 struct cifs_open_parms oparms;
3208 struct cifs_fid fid;
Steve French2e4564b2020-10-22 22:03:14 -05003209 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
Paulo Alcantara04ad69c2021-03-08 12:00:50 -03003210 int flags = CIFS_CP_CREATE_CLOSE_OP;
Steve French2e4564b2020-10-22 22:03:14 -05003211 struct smb_rqst rqst[3];
3212 int resp_buftype[3];
3213 struct kvec rsp_iov[3];
3214 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
3215 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
3216 struct kvec close_iov[1];
Steve French2e4564b2020-10-22 22:03:14 -05003217 struct smb2_ioctl_rsp *ioctl_rsp;
3218 struct reparse_data_buffer *reparse_buf;
3219 u32 plen;
3220
3221 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
3222
3223 if (smb3_encryption_required(tcon))
3224 flags |= CIFS_TRANSFORM_REQ;
3225
3226 memset(rqst, 0, sizeof(rqst));
3227 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
3228 memset(rsp_iov, 0, sizeof(rsp_iov));
3229
3230 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
3231 if (!utf16_path)
3232 return -ENOMEM;
3233
3234 /*
3235 * setup smb2open - TODO add optimization to call cifs_get_readable_path
3236 * to see if there is a handle already open that we can use
3237 */
3238 memset(&open_iov, 0, sizeof(open_iov));
3239 rqst[0].rq_iov = open_iov;
3240 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
3241
3242 memset(&oparms, 0, sizeof(oparms));
3243 oparms.tcon = tcon;
3244 oparms.desired_access = FILE_READ_ATTRIBUTES;
3245 oparms.disposition = FILE_OPEN;
3246 oparms.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT);
3247 oparms.fid = &fid;
3248 oparms.reconnect = false;
3249
3250 rc = SMB2_open_init(tcon, server,
3251 &rqst[0], &oplock, &oparms, utf16_path);
3252 if (rc)
3253 goto query_rp_exit;
3254 smb2_set_next_command(tcon, &rqst[0]);
3255
3256
3257 /* IOCTL */
3258 memset(&io_iov, 0, sizeof(io_iov));
3259 rqst[1].rq_iov = io_iov;
3260 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
3261
3262 rc = SMB2_ioctl_init(tcon, server,
Namjae Jeon79631782020-12-03 12:31:36 +09003263 &rqst[1], COMPOUND_FID,
3264 COMPOUND_FID, FSCTL_GET_REPARSE_POINT,
Steve French2e4564b2020-10-22 22:03:14 -05003265 true /* is_fctl */, NULL, 0,
3266 CIFSMaxBufSize -
3267 MAX_SMB2_CREATE_RESPONSE_SIZE -
3268 MAX_SMB2_CLOSE_RESPONSE_SIZE);
3269 if (rc)
3270 goto query_rp_exit;
3271
3272 smb2_set_next_command(tcon, &rqst[1]);
3273 smb2_set_related(&rqst[1]);
3274
3275
3276 /* Close */
3277 memset(&close_iov, 0, sizeof(close_iov));
3278 rqst[2].rq_iov = close_iov;
3279 rqst[2].rq_nvec = 1;
3280
3281 rc = SMB2_close_init(tcon, server,
3282 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
3283 if (rc)
3284 goto query_rp_exit;
3285
3286 smb2_set_related(&rqst[2]);
3287
3288 rc = compound_send_recv(xid, tcon->ses, server,
3289 flags, 3, rqst,
3290 resp_buftype, rsp_iov);
3291
Steve French2e4564b2020-10-22 22:03:14 -05003292 ioctl_rsp = rsp_iov[1].iov_base;
3293
3294 /*
3295 * Open was successful and we got an ioctl response.
3296 */
3297 if (rc == 0) {
3298 /* See MS-FSCC 2.3.23 */
3299
3300 reparse_buf = (struct reparse_data_buffer *)
3301 ((char *)ioctl_rsp +
3302 le32_to_cpu(ioctl_rsp->OutputOffset));
3303 plen = le32_to_cpu(ioctl_rsp->OutputCount);
3304
3305 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
3306 rsp_iov[1].iov_len) {
3307 cifs_tcon_dbg(FYI, "srv returned invalid ioctl len: %d\n",
3308 plen);
3309 rc = -EIO;
3310 goto query_rp_exit;
3311 }
3312 *tag = le32_to_cpu(reparse_buf->ReparseTag);
3313 }
3314
3315 query_rp_exit:
3316 kfree(utf16_path);
3317 SMB2_open_free(&rqst[0]);
3318 SMB2_ioctl_free(&rqst[1]);
3319 SMB2_close_free(&rqst[2]);
3320 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
3321 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
3322 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
3323 return rc;
3324}
3325
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003326static struct cifs_ntsd *
3327get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003328 const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003329{
3330 struct cifs_ntsd *pntsd = NULL;
3331 unsigned int xid;
3332 int rc = -EOPNOTSUPP;
3333 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3334
3335 if (IS_ERR(tlink))
3336 return ERR_CAST(tlink);
3337
3338 xid = get_xid();
3339 cifs_dbg(FYI, "trying to get acl\n");
3340
3341 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003342 cifsfid->volatile_fid, (void **)&pntsd, pacllen,
3343 info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003344 free_xid(xid);
3345
3346 cifs_put_tlink(tlink);
3347
3348 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3349 if (rc)
3350 return ERR_PTR(rc);
3351 return pntsd;
3352
3353}
3354
3355static struct cifs_ntsd *
3356get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003357 const char *path, u32 *pacllen, u32 info)
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003358{
3359 struct cifs_ntsd *pntsd = NULL;
3360 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3361 unsigned int xid;
3362 int rc;
3363 struct cifs_tcon *tcon;
3364 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3365 struct cifs_fid fid;
3366 struct cifs_open_parms oparms;
3367 __le16 *utf16_path;
3368
3369 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
3370 if (IS_ERR(tlink))
3371 return ERR_CAST(tlink);
3372
3373 tcon = tlink_tcon(tlink);
3374 xid = get_xid();
3375
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003376 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05003377 if (!utf16_path) {
3378 rc = -ENOMEM;
3379 free_xid(xid);
3380 return ERR_PTR(rc);
3381 }
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003382
3383 oparms.tcon = tcon;
3384 oparms.desired_access = READ_CONTROL;
3385 oparms.disposition = FILE_OPEN;
Steve French3c3317d2020-10-21 13:12:08 -05003386 /*
3387 * When querying an ACL, even if the file is a symlink we want to open
3388 * the source not the target, and so the protocol requires that the
3389 * client specify this flag when opening a reparse point
3390 */
3391 oparms.create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT;
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003392 oparms.fid = &fid;
3393 oparms.reconnect = false;
3394
Boris Protopopov3970acf2020-12-18 11:30:12 -06003395 if (info & SACL_SECINFO)
3396 oparms.desired_access |= SYSTEM_SECURITY;
3397
Aurelien Aptel69dda302020-03-02 17:53:22 +01003398 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
3399 NULL);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003400 kfree(utf16_path);
3401 if (!rc) {
3402 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003403 fid.volatile_fid, (void **)&pntsd, pacllen,
3404 info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003405 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3406 }
3407
3408 cifs_put_tlink(tlink);
3409 free_xid(xid);
3410
3411 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3412 if (rc)
3413 return ERR_PTR(rc);
3414 return pntsd;
3415}
3416
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003417static int
3418set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
3419 struct inode *inode, const char *path, int aclflag)
3420{
3421 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3422 unsigned int xid;
3423 int rc, access_flags = 0;
3424 struct cifs_tcon *tcon;
3425 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3426 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3427 struct cifs_fid fid;
3428 struct cifs_open_parms oparms;
3429 __le16 *utf16_path;
3430
3431 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
3432 if (IS_ERR(tlink))
3433 return PTR_ERR(tlink);
3434
3435 tcon = tlink_tcon(tlink);
3436 xid = get_xid();
3437
Boris Protopopov3970acf2020-12-18 11:30:12 -06003438 if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
3439 access_flags |= WRITE_OWNER;
3440 if (aclflag & CIFS_ACL_SACL)
3441 access_flags |= SYSTEM_SECURITY;
3442 if (aclflag & CIFS_ACL_DACL)
3443 access_flags |= WRITE_DAC;
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003444
3445 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
Steve Frenchcfe89092018-05-19 02:04:55 -05003446 if (!utf16_path) {
3447 rc = -ENOMEM;
3448 free_xid(xid);
3449 return rc;
3450 }
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003451
3452 oparms.tcon = tcon;
3453 oparms.desired_access = access_flags;
Amir Goldstein0f060932020-02-03 21:46:43 +02003454 oparms.create_options = cifs_create_options(cifs_sb, 0);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003455 oparms.disposition = FILE_OPEN;
3456 oparms.path = path;
3457 oparms.fid = &fid;
3458 oparms.reconnect = false;
3459
Aurelien Aptel69dda302020-03-02 17:53:22 +01003460 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
3461 NULL, NULL);
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003462 kfree(utf16_path);
3463 if (!rc) {
3464 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3465 fid.volatile_fid, pnntsd, acllen, aclflag);
3466 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3467 }
3468
3469 cifs_put_tlink(tlink);
3470 free_xid(xid);
3471 return rc;
3472}
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05003473
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003474/* Retrieve an ACL from the server */
3475static struct cifs_ntsd *
3476get_smb2_acl(struct cifs_sb_info *cifs_sb,
Boris Protopopov3970acf2020-12-18 11:30:12 -06003477 struct inode *inode, const char *path,
3478 u32 *pacllen, u32 info)
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003479{
3480 struct cifs_ntsd *pntsd = NULL;
3481 struct cifsFileInfo *open_file = NULL;
3482
Boris Protopopov9541b812020-12-17 20:58:08 +00003483 if (inode && !(info & SACL_SECINFO))
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003484 open_file = find_readable_file(CIFS_I(inode), true);
Boris Protopopov9541b812020-12-17 20:58:08 +00003485 if (!open_file || (info & SACL_SECINFO))
Boris Protopopov3970acf2020-12-18 11:30:12 -06003486 return get_smb2_acl_by_path(cifs_sb, path, pacllen, info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003487
Boris Protopopov3970acf2020-12-18 11:30:12 -06003488 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003489 cifsFileInfo_put(open_file);
3490 return pntsd;
3491}
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05003492
Steve French30175622014-08-17 18:16:40 -05003493static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
3494 loff_t offset, loff_t len, bool keep_size)
3495{
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003496 struct cifs_ses *ses = tcon->ses;
Steve French30175622014-08-17 18:16:40 -05003497 struct inode *inode;
3498 struct cifsInodeInfo *cifsi;
3499 struct cifsFileInfo *cfile = file->private_data;
3500 struct file_zero_data_information fsctl_buf;
3501 long rc;
3502 unsigned int xid;
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003503 __le64 eof;
Steve French30175622014-08-17 18:16:40 -05003504
3505 xid = get_xid();
3506
David Howells2b0143b2015-03-17 22:25:59 +00003507 inode = d_inode(cfile->dentry);
Steve French30175622014-08-17 18:16:40 -05003508 cifsi = CIFS_I(inode);
3509
Christoph Probsta205d502019-05-08 21:36:25 +02003510 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
Steve French779ede02019-03-13 01:41:49 -05003511 ses->Suid, offset, len);
3512
Zhang Xiaoxu6b690402020-06-23 07:31:54 -04003513 /*
3514 * We zero the range through ioctl, so we need remove the page caches
3515 * first, otherwise the data may be inconsistent with the server.
3516 */
3517 truncate_pagecache_range(inode, offset, offset + len - 1);
Steve French779ede02019-03-13 01:41:49 -05003518
Steve French30175622014-08-17 18:16:40 -05003519 /* if file not oplocked can't be sure whether asking to extend size */
3520 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003521 if (keep_size == false) {
3522 rc = -EOPNOTSUPP;
Steve French779ede02019-03-13 01:41:49 -05003523 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
3524 tcon->tid, ses->Suid, offset, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003525 free_xid(xid);
3526 return rc;
3527 }
Steve French30175622014-08-17 18:16:40 -05003528
Steve Frenchd1c35af2019-05-09 00:09:37 -05003529 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French30175622014-08-17 18:16:40 -05003530
3531 fsctl_buf.FileOffset = cpu_to_le64(offset);
3532 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3533
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003534 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3535 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
3536 (char *)&fsctl_buf,
3537 sizeof(struct file_zero_data_information),
3538 0, NULL, NULL);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003539 if (rc)
3540 goto zero_range_exit;
3541
3542 /*
3543 * do we also need to change the size of the file?
3544 */
3545 if (keep_size == false && i_size_read(inode) < offset + len) {
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003546 eof = cpu_to_le64(offset + len);
Ronnie Sahlbergc4250142019-05-02 15:52:57 +10003547 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3548 cfile->fid.volatile_fid, cfile->pid, &eof);
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003549 }
3550
Ronnie Sahlberg72c419d2019-03-13 14:37:49 +10003551 zero_range_exit:
Steve French30175622014-08-17 18:16:40 -05003552 free_xid(xid);
Steve French779ede02019-03-13 01:41:49 -05003553 if (rc)
3554 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3555 ses->Suid, offset, len, rc);
3556 else
3557 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3558 ses->Suid, offset, len);
Steve French30175622014-08-17 18:16:40 -05003559 return rc;
3560}
3561
Steve French31742c52014-08-17 08:38:47 -05003562static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3563 loff_t offset, loff_t len)
3564{
3565 struct inode *inode;
Steve French31742c52014-08-17 08:38:47 -05003566 struct cifsFileInfo *cfile = file->private_data;
3567 struct file_zero_data_information fsctl_buf;
3568 long rc;
3569 unsigned int xid;
3570 __u8 set_sparse = 1;
3571
3572 xid = get_xid();
3573
David Howells2b0143b2015-03-17 22:25:59 +00003574 inode = d_inode(cfile->dentry);
Steve French31742c52014-08-17 08:38:47 -05003575
3576 /* Need to make file sparse, if not already, before freeing range. */
3577 /* Consider adding equivalent for compressed since it could also work */
Steve Frenchcfe89092018-05-19 02:04:55 -05003578 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3579 rc = -EOPNOTSUPP;
3580 free_xid(xid);
3581 return rc;
3582 }
Steve French31742c52014-08-17 08:38:47 -05003583
Zhang Xiaoxuacc91c22020-06-23 07:31:53 -04003584 /*
3585 * We implement the punch hole through ioctl, so we need remove the page
3586 * caches first, otherwise the data may be inconsistent with the server.
3587 */
3588 truncate_pagecache_range(inode, offset, offset + len - 1);
3589
Christoph Probsta205d502019-05-08 21:36:25 +02003590 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Steve French31742c52014-08-17 08:38:47 -05003591
3592 fsctl_buf.FileOffset = cpu_to_le64(offset);
3593 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3594
3595 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3596 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
Aurelien Aptel63a83b82018-01-24 13:46:11 +01003597 true /* is_fctl */, (char *)&fsctl_buf,
Steve French153322f2019-03-28 22:32:49 -05003598 sizeof(struct file_zero_data_information),
3599 CIFSMaxBufSize, NULL, NULL);
Steve French31742c52014-08-17 08:38:47 -05003600 free_xid(xid);
3601 return rc;
3602}
3603
Steve French9ccf3212014-10-18 17:01:15 -05003604static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3605 loff_t off, loff_t len, bool keep_size)
3606{
3607 struct inode *inode;
3608 struct cifsInodeInfo *cifsi;
3609 struct cifsFileInfo *cfile = file->private_data;
3610 long rc = -EOPNOTSUPP;
3611 unsigned int xid;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003612 __le64 eof;
Steve French9ccf3212014-10-18 17:01:15 -05003613
3614 xid = get_xid();
3615
David Howells2b0143b2015-03-17 22:25:59 +00003616 inode = d_inode(cfile->dentry);
Steve French9ccf3212014-10-18 17:01:15 -05003617 cifsi = CIFS_I(inode);
3618
Steve French779ede02019-03-13 01:41:49 -05003619 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3620 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003621 /* if file not oplocked can't be sure whether asking to extend size */
3622 if (!CIFS_CACHE_READ(cifsi))
Steve Frenchcfe89092018-05-19 02:04:55 -05003623 if (keep_size == false) {
Steve French779ede02019-03-13 01:41:49 -05003624 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3625 tcon->tid, tcon->ses->Suid, off, len, rc);
Steve Frenchcfe89092018-05-19 02:04:55 -05003626 free_xid(xid);
3627 return rc;
3628 }
Steve French9ccf3212014-10-18 17:01:15 -05003629
3630 /*
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003631 * Extending the file
3632 */
3633 if ((keep_size == false) && i_size_read(inode) < off + len) {
Murphy Zhouef4a6322020-03-18 20:43:38 +08003634 rc = inode_newsize_ok(inode, off + len);
3635 if (rc)
3636 goto out;
3637
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003638 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0)
3639 smb2_set_sparse(xid, tcon, cfile, inode, false);
3640
3641 eof = cpu_to_le64(off + len);
3642 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3643 cfile->fid.volatile_fid, cfile->pid, &eof);
3644 if (rc == 0) {
3645 cifsi->server_eof = off + len;
3646 cifs_setsize(inode, off + len);
3647 cifs_truncate_page(inode->i_mapping, inode->i_size);
3648 truncate_setsize(inode, off + len);
3649 }
3650 goto out;
3651 }
3652
3653 /*
Steve French9ccf3212014-10-18 17:01:15 -05003654 * Files are non-sparse by default so falloc may be a no-op
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003655 * Must check if file sparse. If not sparse, and since we are not
3656 * extending then no need to do anything since file already allocated
Steve French9ccf3212014-10-18 17:01:15 -05003657 */
3658 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003659 rc = 0;
3660 goto out;
Steve French9ccf3212014-10-18 17:01:15 -05003661 }
3662
3663 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3664 /*
3665 * Check if falloc starts within first few pages of file
3666 * and ends within a few pages of the end of file to
3667 * ensure that most of file is being forced to be
3668 * fallocated now. If so then setting whole file sparse
3669 * ie potentially making a few extra pages at the beginning
3670 * or end of the file non-sparse via set_sparse is harmless.
3671 */
Steve Frenchcfe89092018-05-19 02:04:55 -05003672 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3673 rc = -EOPNOTSUPP;
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003674 goto out;
Ronnie Sahlbergf1699472019-03-15 00:08:48 +10003675 }
Steve French9ccf3212014-10-18 17:01:15 -05003676 }
Steve French9ccf3212014-10-18 17:01:15 -05003677
Ronnie Sahlberg8bd0d702020-01-17 11:45:02 +10003678 smb2_set_sparse(xid, tcon, cfile, inode, false);
3679 rc = 0;
3680
3681out:
Steve French779ede02019-03-13 01:41:49 -05003682 if (rc)
3683 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3684 tcon->ses->Suid, off, len, rc);
3685 else
3686 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3687 tcon->ses->Suid, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003688
3689 free_xid(xid);
3690 return rc;
3691}
3692
Ronnie Sahlberg5476b5d2021-03-27 05:52:29 +10003693static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
3694 loff_t off, loff_t len)
3695{
3696 int rc;
3697 unsigned int xid;
3698 struct cifsFileInfo *cfile = file->private_data;
3699 __le64 eof;
3700
3701 xid = get_xid();
3702
3703 if (off >= i_size_read(file->f_inode) ||
3704 off + len >= i_size_read(file->f_inode)) {
3705 rc = -EINVAL;
3706 goto out;
3707 }
3708
3709 rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
3710 i_size_read(file->f_inode) - off - len, off);
3711 if (rc < 0)
3712 goto out;
3713
3714 eof = cpu_to_le64(i_size_read(file->f_inode) - len);
3715 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3716 cfile->fid.volatile_fid, cfile->pid, &eof);
3717 if (rc < 0)
3718 goto out;
3719
3720 rc = 0;
3721 out:
3722 free_xid(xid);
3723 return rc;
3724}
3725
Ronnie Sahlberg7fe6fe92021-03-27 06:31:30 +10003726static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
3727 loff_t off, loff_t len)
3728{
3729 int rc;
3730 unsigned int xid;
3731 struct cifsFileInfo *cfile = file->private_data;
3732 __le64 eof;
3733 __u64 count;
3734
3735 xid = get_xid();
3736
3737 if (off >= i_size_read(file->f_inode)) {
3738 rc = -EINVAL;
3739 goto out;
3740 }
3741
3742 count = i_size_read(file->f_inode) - off;
3743 eof = cpu_to_le64(i_size_read(file->f_inode) + len);
3744
3745 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3746 cfile->fid.volatile_fid, cfile->pid, &eof);
3747 if (rc < 0)
3748 goto out;
3749
3750 rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
3751 if (rc < 0)
3752 goto out;
3753
3754 rc = smb3_zero_range(file, tcon, off, len, 1);
3755 if (rc < 0)
3756 goto out;
3757
3758 rc = 0;
3759 out:
3760 free_xid(xid);
3761 return rc;
3762}
3763
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003764static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3765{
3766 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3767 struct cifsInodeInfo *cifsi;
3768 struct inode *inode;
3769 int rc = 0;
3770 struct file_allocated_range_buffer in_data, *out_data = NULL;
3771 u32 out_data_len;
3772 unsigned int xid;
3773
3774 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3775 return generic_file_llseek(file, offset, whence);
3776
3777 inode = d_inode(cfile->dentry);
3778 cifsi = CIFS_I(inode);
3779
3780 if (offset < 0 || offset >= i_size_read(inode))
3781 return -ENXIO;
3782
3783 xid = get_xid();
3784 /*
3785 * We need to be sure that all dirty pages are written as they
3786 * might fill holes on the server.
3787 * Note that we also MUST flush any written pages since at least
3788 * some servers (Windows2016) will not reflect recent writes in
3789 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3790 */
Aurelien Aptel86f740f2020-02-21 11:19:06 +01003791 wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10003792 if (wrcfile) {
3793 filemap_write_and_wait(inode->i_mapping);
3794 smb2_flush_file(xid, tcon, &wrcfile->fid);
3795 cifsFileInfo_put(wrcfile);
3796 }
3797
3798 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3799 if (whence == SEEK_HOLE)
3800 offset = i_size_read(inode);
3801 goto lseek_exit;
3802 }
3803
3804 in_data.file_offset = cpu_to_le64(offset);
3805 in_data.length = cpu_to_le64(i_size_read(inode));
3806
3807 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3808 cfile->fid.volatile_fid,
3809 FSCTL_QUERY_ALLOCATED_RANGES, true,
3810 (char *)&in_data, sizeof(in_data),
3811 sizeof(struct file_allocated_range_buffer),
3812 (char **)&out_data, &out_data_len);
3813 if (rc == -E2BIG)
3814 rc = 0;
3815 if (rc)
3816 goto lseek_exit;
3817
3818 if (whence == SEEK_HOLE && out_data_len == 0)
3819 goto lseek_exit;
3820
3821 if (whence == SEEK_DATA && out_data_len == 0) {
3822 rc = -ENXIO;
3823 goto lseek_exit;
3824 }
3825
3826 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3827 rc = -EINVAL;
3828 goto lseek_exit;
3829 }
3830 if (whence == SEEK_DATA) {
3831 offset = le64_to_cpu(out_data->file_offset);
3832 goto lseek_exit;
3833 }
3834 if (offset < le64_to_cpu(out_data->file_offset))
3835 goto lseek_exit;
3836
3837 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3838
3839 lseek_exit:
3840 free_xid(xid);
3841 kfree(out_data);
3842 if (!rc)
3843 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3844 else
3845 return rc;
3846}
3847
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003848static int smb3_fiemap(struct cifs_tcon *tcon,
3849 struct cifsFileInfo *cfile,
3850 struct fiemap_extent_info *fei, u64 start, u64 len)
3851{
3852 unsigned int xid;
3853 struct file_allocated_range_buffer in_data, *out_data;
3854 u32 out_data_len;
3855 int i, num, rc, flags, last_blob;
3856 u64 next;
3857
Christoph Hellwig45dd0522020-05-23 09:30:14 +02003858 rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
Christoph Hellwigcddf8a22020-05-23 09:30:13 +02003859 if (rc)
3860 return rc;
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003861
3862 xid = get_xid();
3863 again:
3864 in_data.file_offset = cpu_to_le64(start);
3865 in_data.length = cpu_to_le64(len);
3866
3867 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3868 cfile->fid.volatile_fid,
3869 FSCTL_QUERY_ALLOCATED_RANGES, true,
3870 (char *)&in_data, sizeof(in_data),
3871 1024 * sizeof(struct file_allocated_range_buffer),
3872 (char **)&out_data, &out_data_len);
3873 if (rc == -E2BIG) {
3874 last_blob = 0;
3875 rc = 0;
3876 } else
3877 last_blob = 1;
3878 if (rc)
3879 goto out;
3880
Murphy Zhou979a2662020-03-14 11:38:31 +08003881 if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10003882 rc = -EINVAL;
3883 goto out;
3884 }
3885 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3886 rc = -EINVAL;
3887 goto out;
3888 }
3889
3890 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3891 for (i = 0; i < num; i++) {
3892 flags = 0;
3893 if (i == num - 1 && last_blob)
3894 flags |= FIEMAP_EXTENT_LAST;
3895
3896 rc = fiemap_fill_next_extent(fei,
3897 le64_to_cpu(out_data[i].file_offset),
3898 le64_to_cpu(out_data[i].file_offset),
3899 le64_to_cpu(out_data[i].length),
3900 flags);
3901 if (rc < 0)
3902 goto out;
3903 if (rc == 1) {
3904 rc = 0;
3905 goto out;
3906 }
3907 }
3908
3909 if (!last_blob) {
3910 next = le64_to_cpu(out_data[num - 1].file_offset) +
3911 le64_to_cpu(out_data[num - 1].length);
3912 len = len - (next - start);
3913 start = next;
3914 goto again;
3915 }
3916
3917 out:
3918 free_xid(xid);
3919 kfree(out_data);
3920 return rc;
3921}
Steve French9ccf3212014-10-18 17:01:15 -05003922
Steve French31742c52014-08-17 08:38:47 -05003923static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3924 loff_t off, loff_t len)
3925{
3926 /* KEEP_SIZE already checked for by do_fallocate */
3927 if (mode & FALLOC_FL_PUNCH_HOLE)
3928 return smb3_punch_hole(file, tcon, off, len);
Steve French30175622014-08-17 18:16:40 -05003929 else if (mode & FALLOC_FL_ZERO_RANGE) {
3930 if (mode & FALLOC_FL_KEEP_SIZE)
3931 return smb3_zero_range(file, tcon, off, len, true);
3932 return smb3_zero_range(file, tcon, off, len, false);
Steve French9ccf3212014-10-18 17:01:15 -05003933 } else if (mode == FALLOC_FL_KEEP_SIZE)
3934 return smb3_simple_falloc(file, tcon, off, len, true);
Ronnie Sahlberg5476b5d2021-03-27 05:52:29 +10003935 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3936 return smb3_collapse_range(file, tcon, off, len);
Ronnie Sahlberg7fe6fe92021-03-27 06:31:30 +10003937 else if (mode == FALLOC_FL_INSERT_RANGE)
3938 return smb3_insert_range(file, tcon, off, len);
Steve French9ccf3212014-10-18 17:01:15 -05003939 else if (mode == 0)
3940 return smb3_simple_falloc(file, tcon, off, len, false);
Steve French31742c52014-08-17 08:38:47 -05003941
3942 return -EOPNOTSUPP;
3943}
3944
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003945static void
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003946smb2_downgrade_oplock(struct TCP_Server_Info *server,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003947 struct cifsInodeInfo *cinode, __u32 oplock,
3948 unsigned int epoch, bool *purge_cache)
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003949{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003950 server->ops->set_oplock_level(cinode, oplock, 0, NULL);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003951}
3952
3953static void
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003954smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3955 unsigned int epoch, bool *purge_cache);
3956
3957static void
3958smb3_downgrade_oplock(struct TCP_Server_Info *server,
3959 struct cifsInodeInfo *cinode, __u32 oplock,
3960 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003961{
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07003962 unsigned int old_state = cinode->oplock;
3963 unsigned int old_epoch = cinode->epoch;
3964 unsigned int new_state;
3965
3966 if (epoch > old_epoch) {
3967 smb21_set_oplock_level(cinode, oplock, 0, NULL);
3968 cinode->epoch = epoch;
3969 }
3970
3971 new_state = cinode->oplock;
3972 *purge_cache = false;
3973
3974 if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
3975 (new_state & CIFS_CACHE_READ_FLG) == 0)
3976 *purge_cache = true;
3977 else if (old_state == new_state && (epoch - old_epoch > 1))
3978 *purge_cache = true;
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -08003979}
3980
3981static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003982smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3983 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003984{
3985 oplock &= 0xFF;
Rohith Surabattula0ab95c22021-05-17 11:28:34 +00003986 cinode->lease_granted = false;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003987 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3988 return;
3989 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003990 cinode->oplock = CIFS_CACHE_RHW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003991 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3992 &cinode->vfs_inode);
3993 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
Pavel Shilovsky42873b02013-09-05 21:30:16 +04003994 cinode->oplock = CIFS_CACHE_RW_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04003995 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3996 &cinode->vfs_inode);
3997 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3998 cinode->oplock = CIFS_CACHE_READ_FLG;
3999 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
4000 &cinode->vfs_inode);
4001 } else
4002 cinode->oplock = 0;
4003}
4004
4005static void
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004006smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4007 unsigned int epoch, bool *purge_cache)
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004008{
4009 char message[5] = {0};
Christoph Probst6a54b2e2019-05-07 17:16:40 +02004010 unsigned int new_oplock = 0;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004011
4012 oplock &= 0xFF;
Rohith Surabattula0ab95c22021-05-17 11:28:34 +00004013 cinode->lease_granted = true;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004014 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
4015 return;
4016
Pavel Shilovskya016e272019-09-26 12:31:20 -07004017 /* Check if the server granted an oplock rather than a lease */
4018 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
4019 return smb2_set_oplock_level(cinode, oplock, epoch,
4020 purge_cache);
4021
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004022 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02004023 new_oplock |= CIFS_CACHE_READ_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004024 strcat(message, "R");
4025 }
4026 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02004027 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004028 strcat(message, "H");
4029 }
4030 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
Christoph Probst6a54b2e2019-05-07 17:16:40 +02004031 new_oplock |= CIFS_CACHE_WRITE_FLG;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004032 strcat(message, "W");
4033 }
Christoph Probst6a54b2e2019-05-07 17:16:40 +02004034 if (!new_oplock)
4035 strncpy(message, "None", sizeof(message));
4036
4037 cinode->oplock = new_oplock;
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004038 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
4039 &cinode->vfs_inode);
4040}
4041
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004042static void
4043smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4044 unsigned int epoch, bool *purge_cache)
4045{
4046 unsigned int old_oplock = cinode->oplock;
4047
4048 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
4049
4050 if (purge_cache) {
4051 *purge_cache = false;
4052 if (old_oplock == CIFS_CACHE_READ_FLG) {
4053 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
4054 (epoch - cinode->epoch > 0))
4055 *purge_cache = true;
4056 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
4057 (epoch - cinode->epoch > 1))
4058 *purge_cache = true;
4059 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
4060 (epoch - cinode->epoch > 1))
4061 *purge_cache = true;
4062 else if (cinode->oplock == 0 &&
4063 (epoch - cinode->epoch > 0))
4064 *purge_cache = true;
4065 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
4066 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
4067 (epoch - cinode->epoch > 0))
4068 *purge_cache = true;
4069 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
4070 (epoch - cinode->epoch > 1))
4071 *purge_cache = true;
4072 }
4073 cinode->epoch = epoch;
4074 }
4075}
4076
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04004077static bool
4078smb2_is_read_op(__u32 oplock)
4079{
4080 return oplock == SMB2_OPLOCK_LEVEL_II;
4081}
4082
4083static bool
4084smb21_is_read_op(__u32 oplock)
4085{
4086 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
4087 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
4088}
4089
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004090static __le32
4091map_oplock_to_lease(u8 oplock)
4092{
4093 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
4094 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
4095 else if (oplock == SMB2_OPLOCK_LEVEL_II)
4096 return SMB2_LEASE_READ_CACHING;
4097 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
4098 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
4099 SMB2_LEASE_WRITE_CACHING;
4100 return 0;
4101}
4102
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004103static char *
4104smb2_create_lease_buf(u8 *lease_key, u8 oplock)
4105{
4106 struct create_lease *buf;
4107
4108 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
4109 if (!buf)
4110 return NULL;
4111
Stefano Brivio729c0c92018-07-05 15:10:02 +02004112 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004113 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004114
4115 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4116 (struct create_lease, lcontext));
4117 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
4118 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4119 (struct create_lease, Name));
4120 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07004121 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04004122 buf->Name[0] = 'R';
4123 buf->Name[1] = 'q';
4124 buf->Name[2] = 'L';
4125 buf->Name[3] = 's';
4126 return (char *)buf;
4127}
4128
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004129static char *
4130smb3_create_lease_buf(u8 *lease_key, u8 oplock)
4131{
4132 struct create_lease_v2 *buf;
4133
4134 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
4135 if (!buf)
4136 return NULL;
4137
Stefano Brivio729c0c92018-07-05 15:10:02 +02004138 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004139 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
4140
4141 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4142 (struct create_lease_v2, lcontext));
4143 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
4144 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4145 (struct create_lease_v2, Name));
4146 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07004147 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004148 buf->Name[0] = 'R';
4149 buf->Name[1] = 'q';
4150 buf->Name[2] = 'L';
4151 buf->Name[3] = 's';
4152 return (char *)buf;
4153}
4154
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004155static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06004156smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004157{
4158 struct create_lease *lc = (struct create_lease *)buf;
4159
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004160 *epoch = 0; /* not used */
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04004161 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
4162 return SMB2_OPLOCK_LEVEL_NOCHANGE;
4163 return le32_to_cpu(lc->lcontext.LeaseState);
4164}
4165
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004166static __u8
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06004167smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004168{
4169 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
4170
Pavel Shilovsky42873b02013-09-05 21:30:16 +04004171 *epoch = le16_to_cpu(lc->lcontext.Epoch);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004172 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
4173 return SMB2_OPLOCK_LEVEL_NOCHANGE;
Ronnie Sahlberg96164ab2018-04-26 08:10:18 -06004174 if (lease_key)
Stefano Brivio729c0c92018-07-05 15:10:02 +02004175 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
Pavel Shilovskyf0473902013-09-04 13:44:05 +04004176 return le32_to_cpu(lc->lcontext.LeaseState);
4177}
4178
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004179static unsigned int
4180smb2_wp_retry_size(struct inode *inode)
4181{
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10004182 return min_t(unsigned int, CIFS_SB(inode->i_sb)->ctx->wsize,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04004183 SMB2_MAX_BUFFER_SIZE);
4184}
4185
Pavel Shilovsky52755802014-08-18 20:49:57 +04004186static bool
4187smb2_dir_needs_close(struct cifsFileInfo *cfile)
4188{
4189 return !cfile->invalidHandle;
4190}
4191
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004192static void
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004193fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
Steve French2b2f7542019-06-07 15:16:10 -05004194 struct smb_rqst *old_rq, __le16 cipher_type)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004195{
4196 struct smb2_sync_hdr *shdr =
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004197 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004198
4199 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
4200 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
4201 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
4202 tr_hdr->Flags = cpu_to_le16(0x01);
Steve French63ca5652020-10-15 23:41:40 -05004203 if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4204 (cipher_type == SMB2_ENCRYPTION_AES256_GCM))
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004205 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05004206 else
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004207 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004208 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004209}
4210
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11004211/* We can not use the normal sg_set_buf() as we will sometimes pass a
4212 * stack object as buf.
4213 */
4214static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
4215 unsigned int buflen)
4216{
Sebastien Tisserantee9d6612019-08-01 12:06:08 -05004217 void *addr;
4218 /*
4219 * VMAP_STACK (at least) puts stack into the vmalloc address space
4220 */
4221 if (is_vmalloc_addr(buf))
4222 addr = vmalloc_to_page(buf);
4223 else
4224 addr = virt_to_page(buf);
4225 sg_set_page(sg, addr, buflen, offset_in_page(buf));
Ronnie Sahlberg262916b2018-02-20 12:45:21 +11004226}
4227
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004228/* Assumes the first rqst has a transform header as the first iov.
4229 * I.e.
4230 * rqst[0].rq_iov[0] is transform header
4231 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
4232 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004233 */
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004234static struct scatterlist *
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004235init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004236{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004237 unsigned int sg_len;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004238 struct scatterlist *sg;
4239 unsigned int i;
4240 unsigned int j;
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004241 unsigned int idx = 0;
4242 int skip;
4243
4244 sg_len = 1;
4245 for (i = 0; i < num_rqst; i++)
4246 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004247
4248 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
4249 if (!sg)
4250 return NULL;
4251
4252 sg_init_table(sg, sg_len);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004253 for (i = 0; i < num_rqst; i++) {
4254 for (j = 0; j < rqst[i].rq_nvec; j++) {
4255 /*
4256 * The first rqst has a transform header where the
4257 * first 20 bytes are not part of the encrypted blob
4258 */
4259 skip = (i == 0) && (j == 0) ? 20 : 0;
4260 smb2_sg_set_buf(&sg[idx++],
4261 rqst[i].rq_iov[j].iov_base + skip,
4262 rqst[i].rq_iov[j].iov_len - skip);
Ronnie Sahlberge77fe732018-12-31 13:43:40 +10004263 }
Steve Frenchd5f07fb2018-06-05 17:46:24 -05004264
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004265 for (j = 0; j < rqst[i].rq_npages; j++) {
4266 unsigned int len, offset;
4267
4268 rqst_page_get_length(&rqst[i], j, &len, &offset);
4269 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
4270 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004271 }
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004272 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004273 return sg;
4274}
4275
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004276static int
4277smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
4278{
4279 struct cifs_ses *ses;
4280 u8 *ses_enc_key;
4281
4282 spin_lock(&cifs_tcp_ses_lock);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02004283 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
4284 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
4285 if (ses->Suid == ses_id) {
4286 ses_enc_key = enc ? ses->smb3encryptionkey :
4287 ses->smb3decryptionkey;
Shyam Prasad N45a45462021-03-25 12:34:54 +00004288 memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02004289 spin_unlock(&cifs_tcp_ses_lock);
4290 return 0;
4291 }
4292 }
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004293 }
4294 spin_unlock(&cifs_tcp_ses_lock);
4295
Paul Aurich83728cb2021-04-13 14:25:27 -07004296 return -EAGAIN;
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004297}
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004298/*
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004299 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
4300 * iov[0] - transform header (associate data),
4301 * iov[1-N] - SMB2 header and pages - data to encrypt.
4302 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004303 * untouched.
4304 */
4305static int
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004306crypt_message(struct TCP_Server_Info *server, int num_rqst,
4307 struct smb_rqst *rqst, int enc)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004308{
4309 struct smb2_transform_hdr *tr_hdr =
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004310 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004311 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004312 int rc = 0;
4313 struct scatterlist *sg;
4314 u8 sign[SMB2_SIGNATURE_SIZE] = {};
Shyam Prasad N45a45462021-03-25 12:34:54 +00004315 u8 key[SMB3_ENC_DEC_KEY_SIZE];
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004316 struct aead_request *req;
4317 char *iv;
4318 unsigned int iv_len;
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01004319 DECLARE_CRYPTO_WAIT(wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004320 struct crypto_aead *tfm;
4321 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4322
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004323 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
4324 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004325 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
Pavel Shilovsky61cfac6f2017-02-28 16:05:19 -08004326 enc ? "en" : "de");
Shyam Prasad N0bd294b2020-10-15 10:41:31 -07004327 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004328 }
4329
4330 rc = smb3_crypto_aead_allocate(server);
4331 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004332 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004333 return rc;
4334 }
4335
4336 tfm = enc ? server->secmech.ccmaesencrypt :
4337 server->secmech.ccmaesdecrypt;
Steve French63ca5652020-10-15 23:41:40 -05004338
Shyam Prasad N45a45462021-03-25 12:34:54 +00004339 if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
4340 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
Steve French63ca5652020-10-15 23:41:40 -05004341 rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
4342 else
Shyam Prasad N45a45462021-03-25 12:34:54 +00004343 rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
Steve French63ca5652020-10-15 23:41:40 -05004344
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004345 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004346 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004347 return rc;
4348 }
4349
4350 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
4351 if (rc) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004352 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004353 return rc;
4354 }
4355
4356 req = aead_request_alloc(tfm, GFP_KERNEL);
4357 if (!req) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004358 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004359 return -ENOMEM;
4360 }
4361
4362 if (!enc) {
4363 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
4364 crypt_len += SMB2_SIGNATURE_SIZE;
4365 }
4366
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004367 sg = init_sg(num_rqst, rqst, sign);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004368 if (!sg) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004369 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02004370 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004371 goto free_req;
4372 }
4373
4374 iv_len = crypto_aead_ivsize(tfm);
4375 iv = kzalloc(iv_len, GFP_KERNEL);
4376 if (!iv) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004377 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Christophe Jaillet517a6e42017-06-11 09:12:47 +02004378 rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004379 goto free_sg;
4380 }
Steve French2b2f7542019-06-07 15:16:10 -05004381
Steve French63ca5652020-10-15 23:41:40 -05004382 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4383 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004384 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05004385 else {
4386 iv[0] = 3;
Steve Frenchfd08f2d2020-10-15 00:25:02 -05004387 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
Steve French2b2f7542019-06-07 15:16:10 -05004388 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004389
4390 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
4391 aead_request_set_ad(req, assoc_data_len);
4392
4393 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01004394 crypto_req_done, &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004395
Gilad Ben-Yossefa5186b82017-10-18 08:00:46 +01004396 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
4397 : crypto_aead_decrypt(req), &wait);
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004398
4399 if (!rc && enc)
4400 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
4401
4402 kfree(iv);
4403free_sg:
4404 kfree(sg);
4405free_req:
4406 kfree(req);
4407 return rc;
4408}
4409
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004410void
4411smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004412{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004413 int i, j;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004414
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004415 for (i = 0; i < num_rqst; i++) {
4416 if (rqst[i].rq_pages) {
4417 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
4418 put_page(rqst[i].rq_pages[j]);
4419 kfree(rqst[i].rq_pages);
4420 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004421 }
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004422}
4423
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004424/*
4425 * This function will initialize new_rq and encrypt the content.
4426 * The first entry, new_rq[0], only contains a single iov which contains
4427 * a smb2_transform_hdr and is pre-allocated by the caller.
4428 * This function then populates new_rq[1+] with the content from olq_rq[0+].
4429 *
4430 * The end result is an array of smb_rqst structures where the first structure
4431 * only contains a single iov for the transform header which we then can pass
4432 * to crypt_message().
4433 *
4434 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
4435 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
4436 */
4437static int
4438smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
4439 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004440{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004441 struct page **pages;
4442 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
4443 unsigned int npages;
4444 unsigned int orig_len = 0;
4445 int i, j;
4446 int rc = -ENOMEM;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004447
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004448 for (i = 1; i < num_rqst; i++) {
4449 npages = old_rq[i - 1].rq_npages;
4450 pages = kmalloc_array(npages, sizeof(struct page *),
4451 GFP_KERNEL);
4452 if (!pages)
4453 goto err_free;
4454
4455 new_rq[i].rq_pages = pages;
4456 new_rq[i].rq_npages = npages;
4457 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
4458 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
4459 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
4460 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
4461 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
4462
4463 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
4464
4465 for (j = 0; j < npages; j++) {
4466 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4467 if (!pages[j])
4468 goto err_free;
4469 }
4470
4471 /* copy pages form the old */
4472 for (j = 0; j < npages; j++) {
4473 char *dst, *src;
4474 unsigned int offset, len;
4475
4476 rqst_page_get_length(&new_rq[i], j, &len, &offset);
4477
4478 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
4479 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
4480
4481 memcpy(dst, src, len);
4482 kunmap(new_rq[i].rq_pages[j]);
4483 kunmap(old_rq[i - 1].rq_pages[j]);
4484 }
4485 }
4486
4487 /* fill the 1st iov with a transform header */
Steve French2b2f7542019-06-07 15:16:10 -05004488 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004489
4490 rc = crypt_message(server, num_rqst, new_rq, 1);
Christoph Probsta205d502019-05-08 21:36:25 +02004491 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004492 if (rc)
4493 goto err_free;
4494
4495 return rc;
4496
4497err_free:
4498 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
4499 return rc;
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07004500}
4501
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004502static int
4503smb3_is_transform_hdr(void *buf)
4504{
4505 struct smb2_transform_hdr *trhdr = buf;
4506
4507 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
4508}
4509
4510static int
4511decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
4512 unsigned int buf_data_size, struct page **pages,
Rohith Surabattula62593012020-10-08 09:58:41 +00004513 unsigned int npages, unsigned int page_data_size,
4514 bool is_offloaded)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004515{
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004516 struct kvec iov[2];
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004517 struct smb_rqst rqst = {NULL};
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004518 int rc;
4519
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004520 iov[0].iov_base = buf;
4521 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
4522 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
4523 iov[1].iov_len = buf_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004524
4525 rqst.rq_iov = iov;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004526 rqst.rq_nvec = 2;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004527 rqst.rq_pages = pages;
4528 rqst.rq_npages = npages;
4529 rqst.rq_pagesz = PAGE_SIZE;
4530 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
4531
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +10004532 rc = crypt_message(server, 1, &rqst, 0);
Christoph Probsta205d502019-05-08 21:36:25 +02004533 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004534
4535 if (rc)
4536 return rc;
4537
Ronnie Sahlbergc713c872018-06-12 08:00:58 +10004538 memmove(buf, iov[1].iov_base, buf_data_size);
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10004539
Rohith Surabattula62593012020-10-08 09:58:41 +00004540 if (!is_offloaded)
4541 server->total_read = buf_data_size + page_data_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004542
4543 return rc;
4544}
4545
4546static int
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004547read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
4548 unsigned int npages, unsigned int len)
4549{
4550 int i;
4551 int length;
4552
4553 for (i = 0; i < npages; i++) {
4554 struct page *page = pages[i];
4555 size_t n;
4556
4557 n = len;
4558 if (len >= PAGE_SIZE) {
4559 /* enough data to fill the page */
4560 n = PAGE_SIZE;
4561 len -= n;
4562 } else {
4563 zero_user(page, len, PAGE_SIZE - len);
4564 len = 0;
4565 }
Long Li1dbe3462018-05-30 12:47:55 -07004566 length = cifs_read_page_from_socket(server, page, 0, n);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004567 if (length < 0)
4568 return length;
4569 server->total_read += length;
4570 }
4571
4572 return 0;
4573}
4574
4575static int
4576init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
4577 unsigned int cur_off, struct bio_vec **page_vec)
4578{
4579 struct bio_vec *bvec;
4580 int i;
4581
4582 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
4583 if (!bvec)
4584 return -ENOMEM;
4585
4586 for (i = 0; i < npages; i++) {
4587 bvec[i].bv_page = pages[i];
4588 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
4589 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
4590 data_size -= bvec[i].bv_len;
4591 }
4592
4593 if (data_size != 0) {
4594 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
4595 kfree(bvec);
4596 return -EIO;
4597 }
4598
4599 *page_vec = bvec;
4600 return 0;
4601}
4602
4603static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004604handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
4605 char *buf, unsigned int buf_len, struct page **pages,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004606 unsigned int npages, unsigned int page_data_size,
4607 bool is_offloaded)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004608{
4609 unsigned int data_offset;
4610 unsigned int data_len;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004611 unsigned int cur_off;
4612 unsigned int cur_page_idx;
4613 unsigned int pad_len;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004614 struct cifs_readdata *rdata = mid->callback_data;
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +10004615 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004616 struct bio_vec *bvec = NULL;
4617 struct iov_iter iter;
4618 struct kvec iov;
4619 int length;
Long Li74dcf412017-11-22 17:38:46 -07004620 bool use_rdma_mr = false;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004621
4622 if (shdr->Command != SMB2_READ) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004623 cifs_server_dbg(VFS, "only big read responses are supported\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004624 return -ENOTSUPP;
4625 }
4626
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004627 if (server->ops->is_session_expired &&
4628 server->ops->is_session_expired(buf)) {
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004629 if (!is_offloaded)
4630 cifs_reconnect(server);
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07004631 return -1;
4632 }
4633
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004634 if (server->ops->is_status_pending &&
Pavel Shilovsky66265f12019-01-23 17:11:16 -08004635 server->ops->is_status_pending(buf, server))
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004636 return -1;
4637
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004638 /* set up first two iov to get credits */
4639 rdata->iov[0].iov_base = buf;
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004640 rdata->iov[0].iov_len = 0;
4641 rdata->iov[1].iov_base = buf;
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004642 rdata->iov[1].iov_len =
Pavel Shilovskybb1bccb2019-01-17 16:18:38 -08004643 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004644 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4645 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4646 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4647 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4648
4649 rdata->result = server->ops->map_error(buf, true);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004650 if (rdata->result != 0) {
4651 cifs_dbg(FYI, "%s: server returned error %d\n",
4652 __func__, rdata->result);
Pavel Shilovskyec678ea2019-01-18 15:38:11 -08004653 /* normal error on read response */
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004654 if (is_offloaded)
4655 mid->mid_state = MID_RESPONSE_RECEIVED;
4656 else
4657 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004658 return 0;
4659 }
4660
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004661 data_offset = server->ops->read_data_offset(buf);
Long Li74dcf412017-11-22 17:38:46 -07004662#ifdef CONFIG_CIFS_SMB_DIRECT
4663 use_rdma_mr = rdata->mr;
4664#endif
4665 data_len = server->ops->read_data_length(buf, use_rdma_mr);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004666
4667 if (data_offset < server->vals->read_rsp_size) {
4668 /*
4669 * win2k8 sometimes sends an offset of 0 when the read
4670 * is beyond the EOF. Treat it as if the data starts just after
4671 * the header.
4672 */
4673 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4674 __func__, data_offset);
4675 data_offset = server->vals->read_rsp_size;
4676 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4677 /* data_offset is beyond the end of smallbuf */
4678 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4679 __func__, data_offset);
4680 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004681 if (is_offloaded)
4682 mid->mid_state = MID_RESPONSE_MALFORMED;
4683 else
4684 dequeue_mid(mid, rdata->result);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004685 return 0;
4686 }
4687
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004688 pad_len = data_offset - server->vals->read_rsp_size;
4689
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004690 if (buf_len <= data_offset) {
4691 /* read response payload is in pages */
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004692 cur_page_idx = pad_len / PAGE_SIZE;
4693 cur_off = pad_len % PAGE_SIZE;
4694
4695 if (cur_page_idx != 0) {
4696 /* data offset is beyond the 1st page of response */
4697 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4698 __func__, data_offset);
4699 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004700 if (is_offloaded)
4701 mid->mid_state = MID_RESPONSE_MALFORMED;
4702 else
4703 dequeue_mid(mid, rdata->result);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004704 return 0;
4705 }
4706
4707 if (data_len > page_data_size - pad_len) {
4708 /* data_len is corrupt -- discard frame */
4709 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004710 if (is_offloaded)
4711 mid->mid_state = MID_RESPONSE_MALFORMED;
4712 else
4713 dequeue_mid(mid, rdata->result);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004714 return 0;
4715 }
4716
4717 rdata->result = init_read_bvec(pages, npages, page_data_size,
4718 cur_off, &bvec);
4719 if (rdata->result != 0) {
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004720 if (is_offloaded)
4721 mid->mid_state = MID_RESPONSE_MALFORMED;
4722 else
4723 dequeue_mid(mid, rdata->result);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004724 return 0;
4725 }
4726
David Howellsaa563d72018-10-20 00:57:56 +01004727 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004728 } else if (buf_len >= data_offset + data_len) {
4729 /* read response payload is in buf */
4730 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
4731 iov.iov_base = buf + data_offset;
4732 iov.iov_len = data_len;
David Howellsaa563d72018-10-20 00:57:56 +01004733 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004734 } else {
4735 /* read response payload cannot be in both buf and pages */
4736 WARN_ONCE(1, "buf can not contain only a part of read data");
4737 rdata->result = -EIO;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004738 if (is_offloaded)
4739 mid->mid_state = MID_RESPONSE_MALFORMED;
4740 else
4741 dequeue_mid(mid, rdata->result);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004742 return 0;
4743 }
4744
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004745 length = rdata->copy_into_pages(server, rdata, &iter);
4746
4747 kfree(bvec);
4748
4749 if (length < 0)
4750 return length;
4751
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004752 if (is_offloaded)
4753 mid->mid_state = MID_RESPONSE_RECEIVED;
4754 else
4755 dequeue_mid(mid, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004756 return length;
4757}
4758
Steve French35cf94a2019-09-07 01:09:49 -05004759struct smb2_decrypt_work {
4760 struct work_struct decrypt;
4761 struct TCP_Server_Info *server;
4762 struct page **ppages;
4763 char *buf;
4764 unsigned int npages;
4765 unsigned int len;
4766};
4767
4768
4769static void smb2_decrypt_offload(struct work_struct *work)
4770{
4771 struct smb2_decrypt_work *dw = container_of(work,
4772 struct smb2_decrypt_work, decrypt);
4773 int i, rc;
4774 struct mid_q_entry *mid;
4775
4776 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
Rohith Surabattula62593012020-10-08 09:58:41 +00004777 dw->ppages, dw->npages, dw->len, true);
Steve French35cf94a2019-09-07 01:09:49 -05004778 if (rc) {
4779 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4780 goto free_pages;
4781 }
4782
Steve French22553972019-09-13 16:47:31 -05004783 dw->server->lstrp = jiffies;
Rohith Surabattulaac873aa2020-10-29 05:03:10 +00004784 mid = smb2_find_dequeue_mid(dw->server, dw->buf);
Steve French35cf94a2019-09-07 01:09:49 -05004785 if (mid == NULL)
4786 cifs_dbg(FYI, "mid not found\n");
4787 else {
4788 mid->decrypted = true;
4789 rc = handle_read_data(dw->server, mid, dw->buf,
4790 dw->server->vals->read_rsp_size,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004791 dw->ppages, dw->npages, dw->len,
4792 true);
Rohith Surabattula12541002020-10-29 06:07:56 +00004793 if (rc >= 0) {
4794#ifdef CONFIG_CIFS_STATS2
4795 mid->when_received = jiffies;
4796#endif
Rohith Surabattula9e550b02021-02-16 10:40:45 +00004797 if (dw->server->ops->is_network_name_deleted)
4798 dw->server->ops->is_network_name_deleted(dw->buf,
4799 dw->server);
4800
Rohith Surabattula12541002020-10-29 06:07:56 +00004801 mid->callback(mid);
4802 } else {
4803 spin_lock(&GlobalMid_Lock);
4804 if (dw->server->tcpStatus == CifsNeedReconnect) {
4805 mid->mid_state = MID_RETRY_NEEDED;
4806 spin_unlock(&GlobalMid_Lock);
4807 mid->callback(mid);
4808 } else {
4809 mid->mid_state = MID_REQUEST_SUBMITTED;
4810 mid->mid_flags &= ~(MID_DELETED);
4811 list_add_tail(&mid->qhead,
4812 &dw->server->pending_mid_q);
4813 spin_unlock(&GlobalMid_Lock);
4814 }
4815 }
Steve French22553972019-09-13 16:47:31 -05004816 cifs_mid_q_entry_release(mid);
Steve French35cf94a2019-09-07 01:09:49 -05004817 }
4818
Steve French35cf94a2019-09-07 01:09:49 -05004819free_pages:
4820 for (i = dw->npages-1; i >= 0; i--)
4821 put_page(dw->ppages[i]);
4822
4823 kfree(dw->ppages);
4824 cifs_small_buf_release(dw->buf);
Steve Frencha08d8972019-10-26 16:00:44 -05004825 kfree(dw);
Steve French35cf94a2019-09-07 01:09:49 -05004826}
4827
4828
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004829static int
Steve French35cf94a2019-09-07 01:09:49 -05004830receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4831 int *num_mids)
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004832{
4833 char *buf = server->smallbuf;
4834 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4835 unsigned int npages;
4836 struct page **pages;
4837 unsigned int len;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004838 unsigned int buflen = server->pdu_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004839 int rc;
4840 int i = 0;
Steve French35cf94a2019-09-07 01:09:49 -05004841 struct smb2_decrypt_work *dw;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004842
Steve French35cf94a2019-09-07 01:09:49 -05004843 *num_mids = 1;
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004844 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004845 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4846
4847 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4848 if (rc < 0)
4849 return rc;
4850 server->total_read += rc;
4851
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004852 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
Ronnie Sahlberg93012bf2018-03-31 11:45:31 +11004853 server->vals->read_rsp_size;
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004854 npages = DIV_ROUND_UP(len, PAGE_SIZE);
4855
4856 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
4857 if (!pages) {
4858 rc = -ENOMEM;
4859 goto discard_data;
4860 }
4861
4862 for (; i < npages; i++) {
4863 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4864 if (!pages[i]) {
4865 rc = -ENOMEM;
4866 goto discard_data;
4867 }
4868 }
4869
4870 /* read read data into pages */
4871 rc = read_data_into_pages(server, pages, npages, len);
4872 if (rc)
4873 goto free_pages;
4874
Pavel Shilovsky350be252017-04-10 10:31:33 -07004875 rc = cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004876 if (rc)
4877 goto free_pages;
4878
Steve French35cf94a2019-09-07 01:09:49 -05004879 /*
4880 * For large reads, offload to different thread for better performance,
4881 * use more cores decrypting which can be expensive
4882 */
4883
Steve French10328c42019-09-09 13:30:15 -05004884 if ((server->min_offload) && (server->in_flight > 1) &&
Steve French563317e2019-09-08 23:22:02 -05004885 (server->pdu_size >= server->min_offload)) {
Steve French35cf94a2019-09-07 01:09:49 -05004886 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4887 if (dw == NULL)
4888 goto non_offloaded_decrypt;
4889
4890 dw->buf = server->smallbuf;
4891 server->smallbuf = (char *)cifs_small_buf_get();
4892
4893 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4894
4895 dw->npages = npages;
4896 dw->server = server;
4897 dw->ppages = pages;
4898 dw->len = len;
Steve Frencha08d8972019-10-26 16:00:44 -05004899 queue_work(decrypt_wq, &dw->decrypt);
Steve French35cf94a2019-09-07 01:09:49 -05004900 *num_mids = 0; /* worker thread takes care of finding mid */
4901 return -1;
4902 }
4903
4904non_offloaded_decrypt:
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004905 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Rohith Surabattula62593012020-10-08 09:58:41 +00004906 pages, npages, len, false);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004907 if (rc)
4908 goto free_pages;
4909
4910 *mid = smb2_find_mid(server, buf);
4911 if (*mid == NULL)
4912 cifs_dbg(FYI, "mid not found\n");
4913 else {
4914 cifs_dbg(FYI, "mid found\n");
4915 (*mid)->decrypted = true;
4916 rc = handle_read_data(server, *mid, buf,
4917 server->vals->read_rsp_size,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00004918 pages, npages, len, false);
Rohith Surabattula9e550b02021-02-16 10:40:45 +00004919 if (rc >= 0) {
4920 if (server->ops->is_network_name_deleted) {
4921 server->ops->is_network_name_deleted(buf,
4922 server);
4923 }
4924 }
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004925 }
4926
4927free_pages:
4928 for (i = i - 1; i >= 0; i--)
4929 put_page(pages[i]);
4930 kfree(pages);
4931 return rc;
4932discard_data:
Pavel Shilovsky350be252017-04-10 10:31:33 -07004933 cifs_discard_remaining_data(server);
Pavel Shilovskyc42a6ab2016-11-17 16:20:23 -08004934 goto free_pages;
4935}
4936
4937static int
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004938receive_encrypted_standard(struct TCP_Server_Info *server,
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004939 struct mid_q_entry **mids, char **bufs,
4940 int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004941{
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004942 int ret, length;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004943 char *buf = server->smallbuf;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004944 struct smb2_sync_hdr *shdr;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10004945 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004946 unsigned int buf_size;
4947 struct mid_q_entry *mid_entry;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004948 int next_is_large;
4949 char *next_buffer = NULL;
4950
4951 *num_mids = 0;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004952
4953 /* switch to large buffer if too big for a small one */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004954 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004955 server->large_buf = true;
4956 memcpy(server->bigbuf, buf, server->total_read);
4957 buf = server->bigbuf;
4958 }
4959
4960 /* now read the rest */
4961 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004962 pdu_length - HEADER_SIZE(server) + 1);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004963 if (length < 0)
4964 return length;
4965 server->total_read += length;
4966
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10004967 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Rohith Surabattula62593012020-10-08 09:58:41 +00004968 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004969 if (length)
4970 return length;
4971
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004972 next_is_large = server->large_buf;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004973one_more:
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004974 shdr = (struct smb2_sync_hdr *)buf;
4975 if (shdr->NextCommand) {
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004976 if (next_is_large)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004977 next_buffer = (char *)cifs_buf_get();
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004978 else
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004979 next_buffer = (char *)cifs_small_buf_get();
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004980 memcpy(next_buffer,
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07004981 buf + le32_to_cpu(shdr->NextCommand),
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004982 pdu_length - le32_to_cpu(shdr->NextCommand));
4983 }
4984
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004985 mid_entry = smb2_find_mid(server, buf);
4986 if (mid_entry == NULL)
4987 cifs_dbg(FYI, "mid not found\n");
4988 else {
4989 cifs_dbg(FYI, "mid found\n");
4990 mid_entry->decrypted = true;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004991 mid_entry->resp_buf_size = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08004992 }
4993
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004994 if (*num_mids >= MAX_COMPOUND) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10004995 cifs_server_dbg(VFS, "too many PDUs in compound\n");
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10004996 return -1;
4997 }
4998 bufs[*num_mids] = buf;
4999 mids[(*num_mids)++] = mid_entry;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005000
5001 if (mid_entry && mid_entry->handle)
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005002 ret = mid_entry->handle(server, mid_entry);
5003 else
5004 ret = cifs_handle_standard(server, mid_entry);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005005
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005006 if (ret == 0 && shdr->NextCommand) {
5007 pdu_length -= le32_to_cpu(shdr->NextCommand);
5008 server->large_buf = next_is_large;
5009 if (next_is_large)
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07005010 server->bigbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005011 else
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07005012 server->smallbuf = buf = next_buffer;
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005013 goto one_more;
Pavel Shilovsky3edeb4a2019-07-22 11:38:22 -07005014 } else if (ret != 0) {
5015 /*
5016 * ret != 0 here means that we didn't get to handle_mid() thus
5017 * server->smallbuf and server->bigbuf are still valid. We need
5018 * to free next_buffer because it is not going to be used
5019 * anywhere.
5020 */
5021 if (next_is_large)
5022 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
5023 else
5024 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005025 }
5026
5027 return ret;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005028}
5029
5030static int
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005031smb3_receive_transform(struct TCP_Server_Info *server,
5032 struct mid_q_entry **mids, char **bufs, int *num_mids)
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005033{
5034 char *buf = server->smallbuf;
Ronnie Sahlberg2e964672018-04-09 18:06:26 +10005035 unsigned int pdu_length = server->pdu_size;
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005036 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
5037 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
5038
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005039 if (pdu_length < sizeof(struct smb2_transform_hdr) +
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005040 sizeof(struct smb2_sync_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10005041 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005042 pdu_length);
5043 cifs_reconnect(server);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005044 return -ECONNABORTED;
5045 }
5046
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005047 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10005048 cifs_server_dbg(VFS, "Transform message is broken\n");
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005049 cifs_reconnect(server);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005050 return -ECONNABORTED;
5051 }
5052
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005053 /* TODO: add support for compounds containing READ. */
Paul Aurich6d2f84e2018-12-31 14:13:34 -08005054 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
Steve French35cf94a2019-09-07 01:09:49 -05005055 return receive_encrypted_read(server, &mids[0], num_mids);
Paul Aurich6d2f84e2018-12-31 14:13:34 -08005056 }
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005057
Ronnie Sahlbergb24df3e2018-08-08 15:07:45 +10005058 return receive_encrypted_standard(server, mids, bufs, num_mids);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005059}
5060
5061int
5062smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
5063{
5064 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
5065
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +10005066 return handle_read_data(server, mid, buf, server->pdu_size,
Rohith Surabattulade9ac0a2020-10-28 13:42:21 +00005067 NULL, 0, 0, false);
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005068}
5069
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005070static int
5071smb2_next_header(char *buf)
5072{
5073 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
5074 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
5075
5076 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
5077 return sizeof(struct smb2_transform_hdr) +
5078 le32_to_cpu(t_hdr->OriginalMessageSize);
5079
5080 return le32_to_cpu(hdr->NextCommand);
5081}
5082
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005083static int
5084smb2_make_node(unsigned int xid, struct inode *inode,
5085 struct dentry *dentry, struct cifs_tcon *tcon,
Al Viro55869132021-03-18 01:38:53 -04005086 const char *full_path, umode_t mode, dev_t dev)
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005087{
5088 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
5089 int rc = -EPERM;
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005090 FILE_ALL_INFO *buf = NULL;
Aurelien Aptel7c065142020-06-04 17:23:55 +02005091 struct cifs_io_parms io_parms = {0};
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005092 __u32 oplock = 0;
5093 struct cifs_fid fid;
5094 struct cifs_open_parms oparms;
5095 unsigned int bytes_written;
5096 struct win_dev *pdev;
5097 struct kvec iov[2];
5098
5099 /*
5100 * Check if mounted with mount parm 'sfu' mount parm.
5101 * SFU emulation should work with all servers, but only
5102 * supports block and char device (no socket & fifo),
5103 * and was used by default in earlier versions of Windows
5104 */
5105 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
5106 goto out;
5107
5108 /*
5109 * TODO: Add ability to create instead via reparse point. Windows (e.g.
5110 * their current NFS server) uses this approach to expose special files
5111 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
5112 */
5113
5114 if (!S_ISCHR(mode) && !S_ISBLK(mode))
5115 goto out;
5116
5117 cifs_dbg(FYI, "sfu compat create special file\n");
5118
5119 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
5120 if (buf == NULL) {
5121 rc = -ENOMEM;
5122 goto out;
5123 }
5124
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005125 oparms.tcon = tcon;
5126 oparms.cifs_sb = cifs_sb;
5127 oparms.desired_access = GENERIC_WRITE;
Amir Goldstein0f060932020-02-03 21:46:43 +02005128 oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
5129 CREATE_OPTION_SPECIAL);
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005130 oparms.disposition = FILE_CREATE;
5131 oparms.path = full_path;
5132 oparms.fid = &fid;
5133 oparms.reconnect = false;
5134
5135 if (tcon->ses->server->oplocks)
5136 oplock = REQ_OPLOCK;
5137 else
5138 oplock = 0;
5139 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
5140 if (rc)
5141 goto out;
5142
5143 /*
5144 * BB Do not bother to decode buf since no local inode yet to put
5145 * timestamps in, but we can reuse it safely.
5146 */
5147
5148 pdev = (struct win_dev *)buf;
5149 io_parms.pid = current->tgid;
5150 io_parms.tcon = tcon;
5151 io_parms.offset = 0;
5152 io_parms.length = sizeof(struct win_dev);
5153 iov[1].iov_base = buf;
5154 iov[1].iov_len = sizeof(struct win_dev);
5155 if (S_ISCHR(mode)) {
5156 memcpy(pdev->type, "IntxCHR", 8);
5157 pdev->major = cpu_to_le64(MAJOR(dev));
5158 pdev->minor = cpu_to_le64(MINOR(dev));
5159 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
5160 &bytes_written, iov, 1);
5161 } else if (S_ISBLK(mode)) {
5162 memcpy(pdev->type, "IntxBLK", 8);
5163 pdev->major = cpu_to_le64(MAJOR(dev));
5164 pdev->minor = cpu_to_le64(MINOR(dev));
5165 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
5166 &bytes_written, iov, 1);
5167 }
5168 tcon->ses->server->ops->close(xid, tcon, &fid);
5169 d_drop(dentry);
5170
5171 /* FIXME: add code here to set EAs */
5172out:
5173 kfree(buf);
5174 return rc;
5175}
5176
5177
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005178struct smb_version_operations smb20_operations = {
5179 .compare_fids = smb2_compare_fids,
5180 .setup_request = smb2_setup_request,
5181 .setup_async_request = smb2_setup_async_request,
5182 .check_receive = smb2_check_receive,
5183 .add_credits = smb2_add_credits,
5184 .set_credits = smb2_set_credits,
5185 .get_credits_field = smb2_get_credits_field,
5186 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04005187 .wait_mtu_credits = cifs_wait_mtu_credits,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005188 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005189 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005190 .read_data_offset = smb2_read_data_offset,
5191 .read_data_length = smb2_read_data_length,
5192 .map_error = map_smb2_to_linux_error,
5193 .find_mid = smb2_find_mid,
5194 .check_message = smb2_check_message,
5195 .dump_detail = smb2_dump_detail,
5196 .clear_stats = smb2_clear_stats,
5197 .print_stats = smb2_print_stats,
5198 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005199 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00005200 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005201 .need_neg = smb2_need_neg,
5202 .negotiate = smb2_negotiate,
5203 .negotiate_wsize = smb2_negotiate_wsize,
5204 .negotiate_rsize = smb2_negotiate_rsize,
5205 .sess_setup = SMB2_sess_setup,
5206 .logoff = SMB2_logoff,
5207 .tree_connect = SMB2_tcon,
5208 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05005209 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005210 .is_path_accessible = smb2_is_path_accessible,
5211 .can_echo = smb2_can_echo,
5212 .echo = SMB2_echo,
5213 .query_path_info = smb2_query_path_info,
5214 .get_srv_inum = smb2_get_srv_inum,
5215 .query_file_info = smb2_query_file_info,
5216 .set_path_size = smb2_set_path_size,
5217 .set_file_size = smb2_set_file_size,
5218 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05005219 .set_compression = smb2_set_compression,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005220 .mkdir = smb2_mkdir,
5221 .mkdir_setinfo = smb2_mkdir_setinfo,
5222 .rmdir = smb2_rmdir,
5223 .unlink = smb2_unlink,
5224 .rename = smb2_rename_path,
5225 .create_hardlink = smb2_create_hardlink,
5226 .query_symlink = smb2_query_symlink,
Sachin Prabhu5b23c972016-07-11 16:53:20 +01005227 .query_mf_symlink = smb3_query_mf_symlink,
5228 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005229 .open = smb2_open_file,
5230 .set_fid = smb2_set_fid,
5231 .close = smb2_close_file,
5232 .flush = smb2_flush_file,
5233 .async_readv = smb2_async_readv,
5234 .async_writev = smb2_async_writev,
5235 .sync_read = smb2_sync_read,
5236 .sync_write = smb2_sync_write,
5237 .query_dir_first = smb2_query_dir_first,
5238 .query_dir_next = smb2_query_dir_next,
5239 .close_dir = smb2_close_dir,
5240 .calc_smb_size = smb2_calc_size,
5241 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005242 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005243 .oplock_response = smb2_oplock_response,
5244 .queryfs = smb2_queryfs,
5245 .mand_lock = smb2_mand_lock,
5246 .mand_unlock_range = smb2_unlock_range,
5247 .push_mand_locks = smb2_push_mandatory_locks,
5248 .get_lease_key = smb2_get_lease_key,
5249 .set_lease_key = smb2_set_lease_key,
5250 .new_lease_key = smb2_new_lease_key,
5251 .calc_signature = smb2_calc_signature,
5252 .is_read_op = smb2_is_read_op,
5253 .set_oplock_level = smb2_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005254 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04005255 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005256 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04005257 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04005258 .dir_needs_close = smb2_dir_needs_close,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005259 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305260 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005261#ifdef CONFIG_CIFS_XATTR
5262 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005263 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005264#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05005265 .get_acl = get_smb2_acl,
5266 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05005267 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005268 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005269 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005270 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005271 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005272 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005273 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005274 .is_network_name_deleted = smb2_is_network_name_deleted,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005275};
5276
Steve French1080ef72011-02-24 18:07:19 +00005277struct smb_version_operations smb21_operations = {
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07005278 .compare_fids = smb2_compare_fids,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005279 .setup_request = smb2_setup_request,
Pavel Shilovskyc95b8ee2012-07-11 14:45:28 +04005280 .setup_async_request = smb2_setup_async_request,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005281 .check_receive = smb2_check_receive,
Pavel Shilovsky28ea5292012-05-23 16:18:00 +04005282 .add_credits = smb2_add_credits,
5283 .set_credits = smb2_set_credits,
5284 .get_credits_field = smb2_get_credits_field,
5285 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04005286 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08005287 .adjust_credits = smb2_adjust_credits,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005288 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005289 .revert_current_mid = smb2_revert_current_mid,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005290 .read_data_offset = smb2_read_data_offset,
5291 .read_data_length = smb2_read_data_length,
5292 .map_error = map_smb2_to_linux_error,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04005293 .find_mid = smb2_find_mid,
5294 .check_message = smb2_check_message,
5295 .dump_detail = smb2_dump_detail,
Pavel Shilovskyd60622e2012-05-28 15:19:39 +04005296 .clear_stats = smb2_clear_stats,
5297 .print_stats = smb2_print_stats,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005298 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005299 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07005300 .downgrade_oplock = smb2_downgrade_oplock,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04005301 .need_neg = smb2_need_neg,
5302 .negotiate = smb2_negotiate,
Pavel Shilovsky3a3bab52012-09-18 16:20:28 -07005303 .negotiate_wsize = smb2_negotiate_wsize,
5304 .negotiate_rsize = smb2_negotiate_rsize,
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +04005305 .sess_setup = SMB2_sess_setup,
5306 .logoff = SMB2_logoff,
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +04005307 .tree_connect = SMB2_tcon,
5308 .tree_disconnect = SMB2_tdis,
Steve French34f62642013-10-09 02:07:00 -05005309 .qfs_tcon = smb2_qfs_tcon,
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04005310 .is_path_accessible = smb2_is_path_accessible,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04005311 .can_echo = smb2_can_echo,
5312 .echo = SMB2_echo,
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04005313 .query_path_info = smb2_query_path_info,
5314 .get_srv_inum = smb2_get_srv_inum,
Pavel Shilovskyb7546bc2012-09-18 16:20:27 -07005315 .query_file_info = smb2_query_file_info,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07005316 .set_path_size = smb2_set_path_size,
5317 .set_file_size = smb2_set_file_size,
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07005318 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05005319 .set_compression = smb2_set_compression,
Pavel Shilovskya0e73182011-07-19 12:56:37 +04005320 .mkdir = smb2_mkdir,
5321 .mkdir_setinfo = smb2_mkdir_setinfo,
Pavel Shilovsky1a500f02012-07-10 16:14:38 +04005322 .rmdir = smb2_rmdir,
Pavel Shilovskycbe6f432012-09-18 16:20:25 -07005323 .unlink = smb2_unlink,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07005324 .rename = smb2_rename_path,
Pavel Shilovsky568798c2012-09-18 16:20:31 -07005325 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04005326 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05005327 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05005328 .create_mf_symlink = smb3_create_mf_symlink,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07005329 .open = smb2_open_file,
5330 .set_fid = smb2_set_fid,
5331 .close = smb2_close_file,
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07005332 .flush = smb2_flush_file,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005333 .async_readv = smb2_async_readv,
Pavel Shilovsky33319142012-09-18 16:20:29 -07005334 .async_writev = smb2_async_writev,
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07005335 .sync_read = smb2_sync_read,
Pavel Shilovsky009d3442012-09-18 16:20:30 -07005336 .sync_write = smb2_sync_write,
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07005337 .query_dir_first = smb2_query_dir_first,
5338 .query_dir_next = smb2_query_dir_next,
5339 .close_dir = smb2_close_dir,
5340 .calc_smb_size = smb2_calc_size,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07005341 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005342 .is_session_expired = smb2_is_session_expired,
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07005343 .oplock_response = smb2_oplock_response,
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07005344 .queryfs = smb2_queryfs,
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07005345 .mand_lock = smb2_mand_lock,
5346 .mand_unlock_range = smb2_unlock_range,
Pavel Shilovskyb1407992012-09-19 06:22:44 -07005347 .push_mand_locks = smb2_push_mandatory_locks,
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07005348 .get_lease_key = smb2_get_lease_key,
5349 .set_lease_key = smb2_set_lease_key,
5350 .new_lease_key = smb2_new_lease_key,
Steve French38107d42012-12-08 22:08:06 -06005351 .calc_signature = smb2_calc_signature,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005352 .is_read_op = smb21_is_read_op,
5353 .set_oplock_level = smb21_set_oplock_level,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005354 .create_lease_buf = smb2_create_lease_buf,
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04005355 .parse_lease_buf = smb2_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005356 .copychunk_range = smb2_copychunk_range,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04005357 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04005358 .dir_needs_close = smb2_dir_needs_close,
Steve French834170c2016-09-30 21:14:26 -05005359 .enum_snapshots = smb3_enum_snapshots,
Steve French2c6251a2020-02-12 22:37:08 -06005360 .notify = smb3_notify,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005361 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305362 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005363#ifdef CONFIG_CIFS_XATTR
5364 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005365 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005366#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05005367 .get_acl = get_smb2_acl,
5368 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05005369 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005370 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005371 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005372 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005373 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005374 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005375 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005376 .is_network_name_deleted = smb2_is_network_name_deleted,
Steve French38107d42012-12-08 22:08:06 -06005377};
5378
Steve French38107d42012-12-08 22:08:06 -06005379struct smb_version_operations smb30_operations = {
5380 .compare_fids = smb2_compare_fids,
5381 .setup_request = smb2_setup_request,
5382 .setup_async_request = smb2_setup_async_request,
5383 .check_receive = smb2_check_receive,
5384 .add_credits = smb2_add_credits,
5385 .set_credits = smb2_set_credits,
5386 .get_credits_field = smb2_get_credits_field,
5387 .get_credits = smb2_get_credits,
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04005388 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08005389 .adjust_credits = smb2_adjust_credits,
Steve French38107d42012-12-08 22:08:06 -06005390 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005391 .revert_current_mid = smb2_revert_current_mid,
Steve French38107d42012-12-08 22:08:06 -06005392 .read_data_offset = smb2_read_data_offset,
5393 .read_data_length = smb2_read_data_length,
5394 .map_error = map_smb2_to_linux_error,
5395 .find_mid = smb2_find_mid,
5396 .check_message = smb2_check_message,
5397 .dump_detail = smb2_dump_detail,
5398 .clear_stats = smb2_clear_stats,
5399 .print_stats = smb2_print_stats,
Steve French769ee6a2013-06-19 14:15:30 -05005400 .dump_share_caps = smb2_dump_share_caps,
Steve French38107d42012-12-08 22:08:06 -06005401 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005402 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07005403 .downgrade_oplock = smb3_downgrade_oplock,
Steve French38107d42012-12-08 22:08:06 -06005404 .need_neg = smb2_need_neg,
5405 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05005406 .negotiate_wsize = smb3_negotiate_wsize,
5407 .negotiate_rsize = smb3_negotiate_rsize,
Steve French38107d42012-12-08 22:08:06 -06005408 .sess_setup = SMB2_sess_setup,
5409 .logoff = SMB2_logoff,
5410 .tree_connect = SMB2_tcon,
5411 .tree_disconnect = SMB2_tdis,
Steven Frenchaf6a12e2013-10-09 20:55:53 -05005412 .qfs_tcon = smb3_qfs_tcon,
Steve French38107d42012-12-08 22:08:06 -06005413 .is_path_accessible = smb2_is_path_accessible,
5414 .can_echo = smb2_can_echo,
5415 .echo = SMB2_echo,
5416 .query_path_info = smb2_query_path_info,
Steve French2e4564b2020-10-22 22:03:14 -05005417 /* WSL tags introduced long after smb2.1, enable for SMB3, 3.11 only */
5418 .query_reparse_tag = smb2_query_reparse_tag,
Steve French38107d42012-12-08 22:08:06 -06005419 .get_srv_inum = smb2_get_srv_inum,
5420 .query_file_info = smb2_query_file_info,
5421 .set_path_size = smb2_set_path_size,
5422 .set_file_size = smb2_set_file_size,
5423 .set_file_info = smb2_set_file_info,
Steve French64a5cfa2013-10-14 15:31:32 -05005424 .set_compression = smb2_set_compression,
Steve French38107d42012-12-08 22:08:06 -06005425 .mkdir = smb2_mkdir,
5426 .mkdir_setinfo = smb2_mkdir_setinfo,
5427 .rmdir = smb2_rmdir,
5428 .unlink = smb2_unlink,
5429 .rename = smb2_rename_path,
5430 .create_hardlink = smb2_create_hardlink,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04005431 .query_symlink = smb2_query_symlink,
Steve Frenchc22870e2014-09-16 07:18:19 -05005432 .query_mf_symlink = smb3_query_mf_symlink,
Steve French5ab97572014-09-15 04:49:28 -05005433 .create_mf_symlink = smb3_create_mf_symlink,
Steve French38107d42012-12-08 22:08:06 -06005434 .open = smb2_open_file,
5435 .set_fid = smb2_set_fid,
5436 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06005437 .close_getattr = smb2_close_getattr,
Steve French38107d42012-12-08 22:08:06 -06005438 .flush = smb2_flush_file,
5439 .async_readv = smb2_async_readv,
5440 .async_writev = smb2_async_writev,
5441 .sync_read = smb2_sync_read,
5442 .sync_write = smb2_sync_write,
5443 .query_dir_first = smb2_query_dir_first,
5444 .query_dir_next = smb2_query_dir_next,
5445 .close_dir = smb2_close_dir,
5446 .calc_smb_size = smb2_calc_size,
5447 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005448 .is_session_expired = smb2_is_session_expired,
Steve French38107d42012-12-08 22:08:06 -06005449 .oplock_response = smb2_oplock_response,
5450 .queryfs = smb2_queryfs,
5451 .mand_lock = smb2_mand_lock,
5452 .mand_unlock_range = smb2_unlock_range,
5453 .push_mand_locks = smb2_push_mandatory_locks,
5454 .get_lease_key = smb2_get_lease_key,
5455 .set_lease_key = smb2_set_lease_key,
5456 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06005457 .generate_signingkey = generate_smb30signingkey,
Steve French38107d42012-12-08 22:08:06 -06005458 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05005459 .set_integrity = smb3_set_integrity,
Pavel Shilovsky53ef1012013-09-05 16:11:28 +04005460 .is_read_op = smb21_is_read_op,
Pavel Shilovsky42873b02013-09-05 21:30:16 +04005461 .set_oplock_level = smb3_set_oplock_level,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005462 .create_lease_buf = smb3_create_lease_buf,
5463 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005464 .copychunk_range = smb2_copychunk_range,
Steve Frenchca9e7a12015-10-01 21:40:10 -05005465 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchff1c0382013-11-19 23:44:46 -06005466 .validate_negotiate = smb3_validate_negotiate,
Pavel Shilovsky7f6c5002014-06-22 11:03:22 +04005467 .wp_retry_size = smb2_wp_retry_size,
Pavel Shilovsky52755802014-08-18 20:49:57 +04005468 .dir_needs_close = smb2_dir_needs_close,
Steve French31742c52014-08-17 08:38:47 -05005469 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05005470 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06005471 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07005472 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005473 .is_transform_hdr = smb3_is_transform_hdr,
5474 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005475 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305476 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005477#ifdef CONFIG_CIFS_XATTR
5478 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005479 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005480#endif /* CIFS_XATTR */
Shirish Pargaonkar2f1afe22017-06-22 22:52:05 -05005481 .get_acl = get_smb2_acl,
5482 .get_acl_by_fid = get_smb2_acl_by_fid,
Shirish Pargaonkar366ed842017-06-28 22:37:32 -05005483 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005484 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005485 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005486 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005487 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005488 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005489 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005490 .is_network_name_deleted = smb2_is_network_name_deleted,
Steve French1080ef72011-02-24 18:07:19 +00005491};
5492
Steve Frenchaab18932015-06-23 23:37:11 -05005493struct smb_version_operations smb311_operations = {
5494 .compare_fids = smb2_compare_fids,
5495 .setup_request = smb2_setup_request,
5496 .setup_async_request = smb2_setup_async_request,
5497 .check_receive = smb2_check_receive,
5498 .add_credits = smb2_add_credits,
5499 .set_credits = smb2_set_credits,
5500 .get_credits_field = smb2_get_credits_field,
5501 .get_credits = smb2_get_credits,
5502 .wait_mtu_credits = smb2_wait_mtu_credits,
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08005503 .adjust_credits = smb2_adjust_credits,
Steve Frenchaab18932015-06-23 23:37:11 -05005504 .get_next_mid = smb2_get_next_mid,
Pavel Shilovskyc781af72019-03-04 14:02:50 -08005505 .revert_current_mid = smb2_revert_current_mid,
Steve Frenchaab18932015-06-23 23:37:11 -05005506 .read_data_offset = smb2_read_data_offset,
5507 .read_data_length = smb2_read_data_length,
5508 .map_error = map_smb2_to_linux_error,
5509 .find_mid = smb2_find_mid,
5510 .check_message = smb2_check_message,
5511 .dump_detail = smb2_dump_detail,
5512 .clear_stats = smb2_clear_stats,
5513 .print_stats = smb2_print_stats,
5514 .dump_share_caps = smb2_dump_share_caps,
5515 .is_oplock_break = smb2_is_valid_oplock_break,
Sachin Prabhu38bd4902017-03-03 15:41:38 -08005516 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07005517 .downgrade_oplock = smb3_downgrade_oplock,
Steve Frenchaab18932015-06-23 23:37:11 -05005518 .need_neg = smb2_need_neg,
5519 .negotiate = smb2_negotiate,
Steve French3d621232018-09-25 15:33:47 -05005520 .negotiate_wsize = smb3_negotiate_wsize,
5521 .negotiate_rsize = smb3_negotiate_rsize,
Steve Frenchaab18932015-06-23 23:37:11 -05005522 .sess_setup = SMB2_sess_setup,
5523 .logoff = SMB2_logoff,
5524 .tree_connect = SMB2_tcon,
5525 .tree_disconnect = SMB2_tdis,
5526 .qfs_tcon = smb3_qfs_tcon,
5527 .is_path_accessible = smb2_is_path_accessible,
5528 .can_echo = smb2_can_echo,
5529 .echo = SMB2_echo,
5530 .query_path_info = smb2_query_path_info,
Steve French2e4564b2020-10-22 22:03:14 -05005531 .query_reparse_tag = smb2_query_reparse_tag,
Steve Frenchaab18932015-06-23 23:37:11 -05005532 .get_srv_inum = smb2_get_srv_inum,
5533 .query_file_info = smb2_query_file_info,
5534 .set_path_size = smb2_set_path_size,
5535 .set_file_size = smb2_set_file_size,
5536 .set_file_info = smb2_set_file_info,
5537 .set_compression = smb2_set_compression,
5538 .mkdir = smb2_mkdir,
5539 .mkdir_setinfo = smb2_mkdir_setinfo,
Steve Frenchbea851b2018-06-14 21:56:32 -05005540 .posix_mkdir = smb311_posix_mkdir,
Steve Frenchaab18932015-06-23 23:37:11 -05005541 .rmdir = smb2_rmdir,
5542 .unlink = smb2_unlink,
5543 .rename = smb2_rename_path,
5544 .create_hardlink = smb2_create_hardlink,
5545 .query_symlink = smb2_query_symlink,
5546 .query_mf_symlink = smb3_query_mf_symlink,
5547 .create_mf_symlink = smb3_create_mf_symlink,
5548 .open = smb2_open_file,
5549 .set_fid = smb2_set_fid,
5550 .close = smb2_close_file,
Steve French43f8a6a2019-12-02 21:46:54 -06005551 .close_getattr = smb2_close_getattr,
Steve Frenchaab18932015-06-23 23:37:11 -05005552 .flush = smb2_flush_file,
5553 .async_readv = smb2_async_readv,
5554 .async_writev = smb2_async_writev,
5555 .sync_read = smb2_sync_read,
5556 .sync_write = smb2_sync_write,
5557 .query_dir_first = smb2_query_dir_first,
5558 .query_dir_next = smb2_query_dir_next,
5559 .close_dir = smb2_close_dir,
5560 .calc_smb_size = smb2_calc_size,
5561 .is_status_pending = smb2_is_status_pending,
Pavel Shilovsky511c54a2017-07-08 14:32:00 -07005562 .is_session_expired = smb2_is_session_expired,
Steve Frenchaab18932015-06-23 23:37:11 -05005563 .oplock_response = smb2_oplock_response,
Steve French2d304212018-06-24 23:28:12 -05005564 .queryfs = smb311_queryfs,
Steve Frenchaab18932015-06-23 23:37:11 -05005565 .mand_lock = smb2_mand_lock,
5566 .mand_unlock_range = smb2_unlock_range,
5567 .push_mand_locks = smb2_push_mandatory_locks,
5568 .get_lease_key = smb2_get_lease_key,
5569 .set_lease_key = smb2_set_lease_key,
5570 .new_lease_key = smb2_new_lease_key,
Steve French373512e2015-12-18 13:05:30 -06005571 .generate_signingkey = generate_smb311signingkey,
Steve Frenchaab18932015-06-23 23:37:11 -05005572 .calc_signature = smb3_calc_signature,
Steve Frenchb3152e22015-06-24 03:17:02 -05005573 .set_integrity = smb3_set_integrity,
Steve Frenchaab18932015-06-23 23:37:11 -05005574 .is_read_op = smb21_is_read_op,
5575 .set_oplock_level = smb3_set_oplock_level,
5576 .create_lease_buf = smb3_create_lease_buf,
5577 .parse_lease_buf = smb3_parse_lease_buf,
Sachin Prabhu312bbc52017-04-04 02:12:04 -05005578 .copychunk_range = smb2_copychunk_range,
Steve French02b16662015-06-27 21:18:36 -07005579 .duplicate_extents = smb2_duplicate_extents,
Steve Frenchaab18932015-06-23 23:37:11 -05005580/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
5581 .wp_retry_size = smb2_wp_retry_size,
5582 .dir_needs_close = smb2_dir_needs_close,
5583 .fallocate = smb3_fallocate,
Steve French834170c2016-09-30 21:14:26 -05005584 .enum_snapshots = smb3_enum_snapshots,
Steve Frenchd26c2dd2020-02-06 06:00:14 -06005585 .notify = smb3_notify,
Pavel Shilovsky026e93d2016-11-03 16:47:37 -07005586 .init_transform_rq = smb3_init_transform_rq,
Pavel Shilovsky4326ed22016-11-17 15:24:46 -08005587 .is_transform_hdr = smb3_is_transform_hdr,
5588 .receive_transform = smb3_receive_transform,
Aurelien Aptel9d496402017-02-13 16:16:49 +01005589 .get_dfs_refer = smb2_get_dfs_refer,
Sachin Prabhuef65aae2017-01-18 15:35:57 +05305590 .select_sectype = smb2_select_sectype,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005591#ifdef CONFIG_CIFS_XATTR
5592 .query_all_EAs = smb2_query_eas,
Ronnie Sahlberg55175542017-08-24 11:24:56 +10005593 .set_EA = smb2_set_ea,
Ronnie Sahlberg95907fe2017-08-24 11:24:55 +10005594#endif /* CIFS_XATTR */
Ronnie Sahlbergc1777df2018-08-10 11:03:55 +10005595 .get_acl = get_smb2_acl,
5596 .get_acl_by_fid = get_smb2_acl_by_fid,
5597 .set_acl = set_smb2_acl,
Ronnie Sahlberg8ce79ec2018-06-01 10:53:08 +10005598 .next_header = smb2_next_header,
Ronnie Sahlbergf5b05d62018-10-07 19:19:58 -05005599 .ioctl_query_info = smb2_ioctl_query_info,
Aurelien Aptelc847dcc2019-03-14 00:29:17 -05005600 .make_node = smb2_make_node,
Ronnie Sahlberg2f3ebab2019-04-25 16:45:29 +10005601 .fiemap = smb3_fiemap,
Ronnie Sahlbergdece44e2019-05-15 07:17:02 +10005602 .llseek = smb3_llseek,
Rohith Surabattula8e670f72020-09-18 05:37:28 +00005603 .is_status_io_timeout = smb2_is_status_io_timeout,
Rohith Surabattula9e550b02021-02-16 10:40:45 +00005604 .is_network_name_deleted = smb2_is_network_name_deleted,
Steve Frenchaab18932015-06-23 23:37:11 -05005605};
Steve Frenchaab18932015-06-23 23:37:11 -05005606
Steve Frenchdd446b12012-11-28 23:21:06 -06005607struct smb_version_values smb20_values = {
5608 .version_string = SMB20_VERSION_STRING,
5609 .protocol_id = SMB20_PROT_ID,
5610 .req_capabilities = 0, /* MBZ */
5611 .large_lock_type = 0,
5612 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5613 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5614 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005615 .header_size = sizeof(struct smb2_sync_hdr),
5616 .header_preamble_size = 0,
Steve Frenchdd446b12012-11-28 23:21:06 -06005617 .max_header_size = MAX_SMB2_HDR_SIZE,
5618 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5619 .lock_cmd = SMB2_LOCK,
5620 .cap_unix = 0,
5621 .cap_nt_find = SMB2_NT_FIND,
5622 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005623 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5624 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005625 .create_lease_size = sizeof(struct create_lease),
Steve Frenchdd446b12012-11-28 23:21:06 -06005626};
5627
Steve French1080ef72011-02-24 18:07:19 +00005628struct smb_version_values smb21_values = {
5629 .version_string = SMB21_VERSION_STRING,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005630 .protocol_id = SMB21_PROT_ID,
5631 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
5632 .large_lock_type = 0,
5633 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5634 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5635 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005636 .header_size = sizeof(struct smb2_sync_hdr),
5637 .header_preamble_size = 0,
Steve Frenche4aa25e2012-10-01 12:26:22 -05005638 .max_header_size = MAX_SMB2_HDR_SIZE,
5639 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5640 .lock_cmd = SMB2_LOCK,
5641 .cap_unix = 0,
5642 .cap_nt_find = SMB2_NT_FIND,
5643 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005644 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5645 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04005646 .create_lease_size = sizeof(struct create_lease),
Steve Frenche4aa25e2012-10-01 12:26:22 -05005647};
5648
Steve French9764c022017-09-17 10:41:35 -05005649struct smb_version_values smb3any_values = {
5650 .version_string = SMB3ANY_VERSION_STRING,
5651 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005652 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005653 .large_lock_type = 0,
5654 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5655 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5656 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005657 .header_size = sizeof(struct smb2_sync_hdr),
5658 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005659 .max_header_size = MAX_SMB2_HDR_SIZE,
5660 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5661 .lock_cmd = SMB2_LOCK,
5662 .cap_unix = 0,
5663 .cap_nt_find = SMB2_NT_FIND,
5664 .cap_large_files = SMB2_LARGE_FILES,
5665 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5666 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5667 .create_lease_size = sizeof(struct create_lease_v2),
5668};
5669
5670struct smb_version_values smbdefault_values = {
5671 .version_string = SMBDEFAULT_VERSION_STRING,
5672 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
Steve Frenchf8015682018-08-31 15:12:10 -05005673 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French9764c022017-09-17 10:41:35 -05005674 .large_lock_type = 0,
5675 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5676 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5677 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005678 .header_size = sizeof(struct smb2_sync_hdr),
5679 .header_preamble_size = 0,
Steve French9764c022017-09-17 10:41:35 -05005680 .max_header_size = MAX_SMB2_HDR_SIZE,
5681 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5682 .lock_cmd = SMB2_LOCK,
5683 .cap_unix = 0,
5684 .cap_nt_find = SMB2_NT_FIND,
5685 .cap_large_files = SMB2_LARGE_FILES,
5686 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5687 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5688 .create_lease_size = sizeof(struct create_lease_v2),
5689};
5690
Steve Frenche4aa25e2012-10-01 12:26:22 -05005691struct smb_version_values smb30_values = {
5692 .version_string = SMB30_VERSION_STRING,
5693 .protocol_id = SMB30_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005694 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Pavel Shilovsky027e8ee2012-09-19 06:22:43 -07005695 .large_lock_type = 0,
5696 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5697 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5698 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005699 .header_size = sizeof(struct smb2_sync_hdr),
5700 .header_preamble_size = 0,
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04005701 .max_header_size = MAX_SMB2_HDR_SIZE,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07005702 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +04005703 .lock_cmd = SMB2_LOCK,
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04005704 .cap_unix = 0,
5705 .cap_nt_find = SMB2_NT_FIND,
5706 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005707 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5708 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005709 .create_lease_size = sizeof(struct create_lease_v2),
Steve French1080ef72011-02-24 18:07:19 +00005710};
Steve French20b6d8b2013-06-12 22:48:41 -05005711
5712struct smb_version_values smb302_values = {
5713 .version_string = SMB302_VERSION_STRING,
5714 .protocol_id = SMB302_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005715 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French20b6d8b2013-06-12 22:48:41 -05005716 .large_lock_type = 0,
5717 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5718 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5719 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005720 .header_size = sizeof(struct smb2_sync_hdr),
5721 .header_preamble_size = 0,
Steve French20b6d8b2013-06-12 22:48:41 -05005722 .max_header_size = MAX_SMB2_HDR_SIZE,
5723 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5724 .lock_cmd = SMB2_LOCK,
5725 .cap_unix = 0,
5726 .cap_nt_find = SMB2_NT_FIND,
5727 .cap_large_files = SMB2_LARGE_FILES,
Jeff Layton502858822013-06-27 12:45:00 -04005728 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5729 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
Pavel Shilovskyf0473902013-09-04 13:44:05 +04005730 .create_lease_size = sizeof(struct create_lease_v2),
Steve French20b6d8b2013-06-12 22:48:41 -05005731};
Steve French5f7fbf72014-12-17 22:52:58 -06005732
Steve French5f7fbf72014-12-17 22:52:58 -06005733struct smb_version_values smb311_values = {
5734 .version_string = SMB311_VERSION_STRING,
5735 .protocol_id = SMB311_PROT_ID,
Steve Frenchf8015682018-08-31 15:12:10 -05005736 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
Steve French5f7fbf72014-12-17 22:52:58 -06005737 .large_lock_type = 0,
5738 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5739 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5740 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
Ronnie Sahlberg977b6172018-06-01 10:53:02 +10005741 .header_size = sizeof(struct smb2_sync_hdr),
5742 .header_preamble_size = 0,
Steve French5f7fbf72014-12-17 22:52:58 -06005743 .max_header_size = MAX_SMB2_HDR_SIZE,
5744 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5745 .lock_cmd = SMB2_LOCK,
5746 .cap_unix = 0,
5747 .cap_nt_find = SMB2_NT_FIND,
5748 .cap_large_files = SMB2_LARGE_FILES,
5749 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5750 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5751 .create_lease_size = sizeof(struct create_lease_v2),
5752};