blob: cb3ee916f5275e1eef81a53fdd4685b860d09349 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/transport.c
3 *
Steve Frenchad7a2922008-02-07 23:25:02 +00004 * Copyright (C) International Business Machines Corp., 2002,2008
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Steve French (sfrench@us.ibm.com)
Steve French14a441a2b2006-07-16 04:32:51 +00006 * Jeremy Allison (jra@samba.org) 2006.
Steve French79a58d12007-07-06 22:44:50 +00007 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
Steve French79a58d12007-07-06 22:44:50 +000020 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
Jeff Laytonf06ac722011-10-19 15:30:40 -040029#include <linux/freezer.h>
Jeff Laytonb8eed282012-09-18 16:20:35 -070030#include <linux/tcp.h>
Christoph Hellwig2f8b5442016-11-01 07:40:13 -060031#include <linux/bvec.h>
Jeff Layton97bc00b2012-09-18 16:20:35 -070032#include <linux/highmem.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/processor.h>
35#include <linux/mempool.h>
Ronnie Sahlberg14e25972019-05-13 11:24:17 +100036#include <linux/sched/signal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
Aurelien Aptel8bd68c62018-02-16 19:19:29 +010041#include "smb2proto.h"
Long Li9762c2d2017-11-22 17:38:43 -070042#include "smbdirect.h"
Steve French50c2f752007-07-13 00:33:32 +000043
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +110044/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040047void
48cifs_wake_up_task(struct mid_q_entry *mid)
Jeff Layton2b84a36c2011-01-11 07:24:21 -050049{
50 wake_up_process(mid->callback_data);
51}
52
Jeff Laytona6827c12011-01-11 07:24:21 -050053struct mid_q_entry *
Jeff Layton24b9b062008-12-01 07:09:34 -050054AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
56 struct mid_q_entry *temp;
57
Jeff Layton24b9b062008-12-01 07:09:34 -050058 if (server == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -050059 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 return NULL;
61 }
Steve French50c2f752007-07-13 00:33:32 +000062
Pekka Enberg232087c2008-09-15 13:22:54 +030063 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
NeilBrowna6f74e82017-04-10 12:08:53 +100064 memset(temp, 0, sizeof(struct mid_q_entry));
Lars Persson696e4202018-06-25 14:05:25 +020065 kref_init(&temp->refcount);
NeilBrowna6f74e82017-04-10 12:08:53 +100066 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
Steve French1047abc2005-10-11 19:58:06 -070070 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
NeilBrowna6f74e82017-04-10 12:08:53 +100071 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
Jeff Layton2b84a36c2011-01-11 07:24:21 -050074
NeilBrowna6f74e82017-04-10 12:08:53 +100075 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
Vincent Whitchurchf1f27ad2020-01-23 17:09:06 +010079 get_task_struct(current);
80 temp->creator = current;
NeilBrowna6f74e82017-04-10 12:08:53 +100081 temp->callback = cifs_wake_up_task;
82 temp->callback_data = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 atomic_inc(&midCount);
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -040085 temp->mid_state = MID_REQUEST_ALLOCATED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 return temp;
87}
88
Lars Persson696e4202018-06-25 14:05:25 +020089static void _cifs_mid_q_entry_release(struct kref *refcount)
90{
Pavel Shilovskyabe57072019-10-22 08:41:42 -070091 struct mid_q_entry *midEntry =
92 container_of(refcount, struct mid_q_entry, refcount);
Steve French1047abc2005-10-11 19:58:06 -070093#ifdef CONFIG_CIFS_STATS2
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040094 __le16 command = midEntry->server->vals->lock_cmd;
Steve French433b8dd2019-03-26 13:53:21 -050095 __u16 smb_cmd = le16_to_cpu(midEntry->command);
Steve French1047abc2005-10-11 19:58:06 -070096 unsigned long now;
Steve French433b8dd2019-03-26 13:53:21 -050097 unsigned long roundtrip_time;
Steve French1047abc2005-10-11 19:58:06 -070098#endif
Pavel Shilovsky7b718432019-11-21 11:35:14 -080099 struct TCP_Server_Info *server = midEntry->server;
100
101 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 server->ops->handle_cancelled_mid)
104 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -0400106 midEntry->mid_state = MID_FREE;
Jeff Layton80975312011-01-11 07:24:02 -0500107 atomic_dec(&midCount);
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -0400108 if (midEntry->large_buf)
Steve Frenchb8643e12005-04-28 22:41:07 -0700109 cifs_buf_release(midEntry->resp_buf);
110 else
111 cifs_small_buf_release(midEntry->resp_buf);
Steve French1047abc2005-10-11 19:58:06 -0700112#ifdef CONFIG_CIFS_STATS2
113 now = jiffies;
Steve French433b8dd2019-03-26 13:53:21 -0500114 if (now < midEntry->when_alloc)
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000115 cifs_server_dbg(VFS, "invalid mid allocation time\n");
Steve French433b8dd2019-03-26 13:53:21 -0500116 roundtrip_time = now - midEntry->when_alloc;
117
118 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 server->slowest_cmd[smb_cmd] = roundtrip_time;
121 server->fastest_cmd[smb_cmd] = roundtrip_time;
122 } else {
123 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 server->slowest_cmd[smb_cmd] = roundtrip_time;
125 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 server->fastest_cmd[smb_cmd] = roundtrip_time;
127 }
128 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 server->time_per_cmd[smb_cmd] += roundtrip_time;
130 }
Steve French00778e22018-09-18 14:05:18 -0500131 /*
132 * commands taking longer than one second (default) can be indications
133 * that something is wrong, unless it is quite a slow link or a very
134 * busy server. Note that this calc is unlikely or impossible to wrap
135 * as long as slow_rsp_threshold is not set way above recommended max
136 * value (32767 ie 9 hours) and is generally harmless even if wrong
137 * since only affects debug counters - so leaving the calc as simple
138 * comparison rather than doing multiple conversions and overflow
139 * checks
140 */
141 if ((slow_rsp_threshold != 0) &&
142 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
Steve French020eec52018-08-01 16:38:07 -0500143 (midEntry->command != command)) {
Steve Frenchf5942db2018-11-14 01:37:39 -0600144 /*
145 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146 * NB: le16_to_cpu returns unsigned so can not be negative below
147 */
Steve French433b8dd2019-03-26 13:53:21 -0500148 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
Steve French468d6772018-08-04 05:24:34 -0500150
Steve French433b8dd2019-03-26 13:53:21 -0500151 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
Steve French020eec52018-08-01 16:38:07 -0500152 midEntry->when_sent, midEntry->when_received);
153 if (cifsFYI & CIFS_TIMER) {
Andy Shevchenko0b456f02014-08-27 16:49:44 +0300154 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
Steve French1047abc2005-10-11 19:58:06 -0700155 midEntry->command, midEntry->mid);
Rodrigo Freiref80eaed2018-10-07 12:21:26 -0300156 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
Steve French1047abc2005-10-11 19:58:06 -0700157 now - midEntry->when_alloc,
158 now - midEntry->when_sent,
159 now - midEntry->when_received);
160 }
161 }
162#endif
Vincent Whitchurchf1f27ad2020-01-23 17:09:06 +0100163 put_task_struct(midEntry->creator);
Pavel Shilovskyabe57072019-10-22 08:41:42 -0700164
165 mempool_free(midEntry, cifs_mid_poolp);
166}
167
168void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169{
170 spin_lock(&GlobalMid_Lock);
171 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 spin_unlock(&GlobalMid_Lock);
173}
174
175void DeleteMidQEntry(struct mid_q_entry *midEntry)
176{
Lars Persson696e4202018-06-25 14:05:25 +0200177 cifs_mid_q_entry_release(midEntry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
179
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700180void
181cifs_delete_mid(struct mid_q_entry *mid)
Jeff Laytonddc8cf82011-01-11 07:24:02 -0500182{
183 spin_lock(&GlobalMid_Lock);
Pavel Shilovskyabe57072019-10-22 08:41:42 -0700184 if (!(mid->mid_flags & MID_DELETED)) {
185 list_del_init(&mid->qhead);
186 mid->mid_flags |= MID_DELETED;
187 }
Jeff Laytonddc8cf82011-01-11 07:24:02 -0500188 spin_unlock(&GlobalMid_Lock);
189
190 DeleteMidQEntry(mid);
191}
192
Jeff Layton6f49f462012-09-18 16:20:34 -0700193/*
194 * smb_send_kvec - send an array of kvecs to the server
195 * @server: Server to send the data to
Al Viro3ab3f2a2015-11-13 02:36:04 -0500196 * @smb_msg: Message to send
Jeff Layton6f49f462012-09-18 16:20:34 -0700197 * @sent: amount of data sent on socket is stored here
198 *
199 * Our basic "send data to server" function. Should be called with srv_mutex
200 * held. The caller is responsible for handling the results.
201 */
Steve Frenchd6e04ae2005-06-13 13:24:43 -0500202static int
Al Viro3ab3f2a2015-11-13 02:36:04 -0500203smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 size_t *sent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
206 int rc = 0;
Al Viro3ab3f2a2015-11-13 02:36:04 -0500207 int retries = 0;
Steve Frenchedf1ae42008-10-29 00:47:57 +0000208 struct socket *ssocket = server->ssocket;
Steve French50c2f752007-07-13 00:33:32 +0000209
Jeff Layton6f49f462012-09-18 16:20:34 -0700210 *sent = 0;
211
Al Viro3ab3f2a2015-11-13 02:36:04 -0500212 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213 smb_msg->msg_namelen = sizeof(struct sockaddr);
214 smb_msg->msg_control = NULL;
215 smb_msg->msg_controllen = 0;
Jeff Layton0496e022008-12-30 12:39:16 -0500216 if (server->noblocksnd)
Al Viro3ab3f2a2015-11-13 02:36:04 -0500217 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
Steve Frenchedf1ae42008-10-29 00:47:57 +0000218 else
Al Viro3ab3f2a2015-11-13 02:36:04 -0500219 smb_msg->msg_flags = MSG_NOSIGNAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Al Viro3ab3f2a2015-11-13 02:36:04 -0500221 while (msg_data_left(smb_msg)) {
Jeff Layton6f49f462012-09-18 16:20:34 -0700222 /*
223 * If blocking send, we try 3 times, since each can block
224 * for 5 seconds. For nonblocking we have to try more
225 * but wait increasing amounts of time allowing time for
226 * socket to clear. The overall time we wait in either
227 * case to send on the socket is about 15 seconds.
228 * Similarly we wait for 15 seconds for a response from
229 * the server in SendReceive[2] for the server to send
230 * a response back for most types of requests (except
231 * SMB Write past end of file which can be slow, and
232 * blocking lock operations). NFS waits slightly longer
233 * than CIFS, but this can make it take longer for
234 * nonresponsive servers to be detected and 15 seconds
235 * is more than enough time for modern networks to
236 * send a packet. In most cases if we fail to send
237 * after the retries we will kill the socket and
238 * reconnect which may clear the network problem.
239 */
Al Viro3ab3f2a2015-11-13 02:36:04 -0500240 rc = sock_sendmsg(ssocket, smb_msg);
Jeff Laytonce6c44e2013-03-22 08:36:45 -0400241 if (rc == -EAGAIN) {
Al Viro3ab3f2a2015-11-13 02:36:04 -0500242 retries++;
243 if (retries >= 14 ||
244 (!server->noblocksnd && (retries > 2))) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000245 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
Joe Perchesf96637b2013-05-04 22:12:25 -0500246 ssocket);
Al Viro3ab3f2a2015-11-13 02:36:04 -0500247 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 }
Al Viro3ab3f2a2015-11-13 02:36:04 -0500249 msleep(1 << retries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 continue;
251 }
Jeff Layton6f49f462012-09-18 16:20:34 -0700252
Steve French79a58d12007-07-06 22:44:50 +0000253 if (rc < 0)
Al Viro3ab3f2a2015-11-13 02:36:04 -0500254 return rc;
Jeff Layton6f49f462012-09-18 16:20:34 -0700255
Steve French79a58d12007-07-06 22:44:50 +0000256 if (rc == 0) {
Steve French3e844692005-10-03 13:37:24 -0700257 /* should never happen, letting socket clear before
258 retrying is our only obvious option here */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000259 cifs_server_dbg(VFS, "tcp sent no data\n");
Steve French3e844692005-10-03 13:37:24 -0700260 msleep(500);
261 continue;
262 }
Jeff Layton6f49f462012-09-18 16:20:34 -0700263
Al Viro3ab3f2a2015-11-13 02:36:04 -0500264 /* send was at least partially successful */
265 *sent += rc;
266 retries = 0; /* in case we get ENOSPC on the next send */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 }
Al Viro3ab3f2a2015-11-13 02:36:04 -0500268 return 0;
Jeff Layton97bc00b2012-09-18 16:20:35 -0700269}
270
Paulo Alcantara35e2cc12018-06-15 10:22:44 -0300271unsigned long
Ronnie Sahlberg81f39f92018-06-28 10:47:14 +1000272smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
Jeff Laytona26054d2014-02-14 07:21:00 -0500273{
274 unsigned int i;
Paulo Alcantara35e2cc12018-06-15 10:22:44 -0300275 struct kvec *iov;
276 int nvec;
Jeff Laytona26054d2014-02-14 07:21:00 -0500277 unsigned long buflen = 0;
278
Ronnie Sahlberg81f39f92018-06-28 10:47:14 +1000279 if (server->vals->header_preamble_size == 0 &&
280 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
Paulo Alcantara35e2cc12018-06-15 10:22:44 -0300281 iov = &rqst->rq_iov[1];
282 nvec = rqst->rq_nvec - 1;
283 } else {
284 iov = rqst->rq_iov;
285 nvec = rqst->rq_nvec;
286 }
287
Jeff Laytona26054d2014-02-14 07:21:00 -0500288 /* total up iov array first */
Paulo Alcantara35e2cc12018-06-15 10:22:44 -0300289 for (i = 0; i < nvec; i++)
Jeff Laytona26054d2014-02-14 07:21:00 -0500290 buflen += iov[i].iov_len;
291
Long Lic06a0f22018-05-30 12:47:57 -0700292 /*
293 * Add in the page array if there is one. The caller needs to make
294 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295 * multiple pages ends at page boundary, rq_tailsz needs to be set to
296 * PAGE_SIZE.
297 */
Jeff Laytona26054d2014-02-14 07:21:00 -0500298 if (rqst->rq_npages) {
Long Lic06a0f22018-05-30 12:47:57 -0700299 if (rqst->rq_npages == 1)
300 buflen += rqst->rq_tailsz;
301 else {
302 /*
303 * If there is more than one page, calculate the
304 * buffer length based on rq_offset and rq_tailsz
305 */
306 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307 rqst->rq_offset;
308 buflen += rqst->rq_tailsz;
309 }
Jeff Laytona26054d2014-02-14 07:21:00 -0500310 }
311
312 return buflen;
313}
314
Jeff Layton6f49f462012-09-18 16:20:34 -0700315static int
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000316__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317 struct smb_rqst *rqst)
Jeff Layton6f49f462012-09-18 16:20:34 -0700318{
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000319 int rc = 0;
320 struct kvec *iov;
321 int n_vec;
322 unsigned int send_length = 0;
323 unsigned int i, j;
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800324 sigset_t mask, oldmask;
Al Viro3ab3f2a2015-11-13 02:36:04 -0500325 size_t total_len = 0, sent, size;
Jeff Laytonb8eed282012-09-18 16:20:35 -0700326 struct socket *ssocket = server->ssocket;
Al Viro3ab3f2a2015-11-13 02:36:04 -0500327 struct msghdr smb_msg;
Jeff Laytonb8eed282012-09-18 16:20:35 -0700328 int val = 1;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000329 __be32 rfc1002_marker;
330
Long Li4357d452019-10-16 13:51:56 -0700331 if (cifs_rdma_enabled(server)) {
332 /* return -EAGAIN when connecting or reconnecting */
333 rc = -EAGAIN;
334 if (server->smbd_conn)
335 rc = smbd_send(server, num_rqst, rqst);
Long Li9762c2d2017-11-22 17:38:43 -0700336 goto smbd_done;
337 }
Pavel Shilovskyafc18a62019-03-05 15:51:56 -0800338
Jeff Laytonea702b82012-12-27 07:28:55 -0500339 if (ssocket == NULL)
Pavel Shilovskyafc18a62019-03-05 15:51:56 -0800340 return -EAGAIN;
Jeff Laytonea702b82012-12-27 07:28:55 -0500341
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800342 if (signal_pending(current)) {
343 cifs_dbg(FYI, "signal is pending before sending any data\n");
344 return -EINTR;
345 }
346
Jeff Laytonb8eed282012-09-18 16:20:35 -0700347 /* cork the socket */
348 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
349 (char *)&val, sizeof(val));
350
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000351 for (j = 0; j < num_rqst; j++)
Ronnie Sahlberg81f39f92018-06-28 10:47:14 +1000352 send_length += smb_rqst_len(server, &rqst[j]);
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000353 rfc1002_marker = cpu_to_be32(send_length);
354
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800355 /*
356 * We should not allow signals to interrupt the network send because
357 * any partial send will cause session reconnects thus increasing
358 * latency of system calls and overload a server with unnecessary
359 * requests.
360 */
361
362 sigfillset(&mask);
363 sigprocmask(SIG_BLOCK, &mask, &oldmask);
364
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000365 /* Generate a rfc1002 marker for SMB2+ */
366 if (server->vals->header_preamble_size == 0) {
367 struct kvec hiov = {
368 .iov_base = &rfc1002_marker,
369 .iov_len = 4
370 };
David Howellsaa563d72018-10-20 00:57:56 +0100371 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000372 rc = smb_send_kvec(server, &smb_msg, &sent);
373 if (rc < 0)
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800374 goto unmask;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000375
376 total_len += sent;
377 send_length += 4;
378 }
379
Paulo Alcantara662bf5b2018-06-14 17:34:08 -0300380 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
381
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000382 for (j = 0; j < num_rqst; j++) {
383 iov = rqst[j].rq_iov;
384 n_vec = rqst[j].rq_nvec;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000385
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000386 size = 0;
Paulo Alcantara662bf5b2018-06-14 17:34:08 -0300387 for (i = 0; i < n_vec; i++) {
388 dump_smb(iov[i].iov_base, iov[i].iov_len);
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000389 size += iov[i].iov_len;
Paulo Alcantara662bf5b2018-06-14 17:34:08 -0300390 }
Al Viro3ab3f2a2015-11-13 02:36:04 -0500391
David Howellsaa563d72018-10-20 00:57:56 +0100392 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Al Viro3ab3f2a2015-11-13 02:36:04 -0500394 rc = smb_send_kvec(server, &smb_msg, &sent);
Jeff Layton97bc00b2012-09-18 16:20:35 -0700395 if (rc < 0)
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800396 goto unmask;
Jeff Layton97bc00b2012-09-18 16:20:35 -0700397
398 total_len += sent;
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000399
400 /* now walk the page array and send each page in it */
401 for (i = 0; i < rqst[j].rq_npages; i++) {
402 struct bio_vec bvec;
403
404 bvec.bv_page = rqst[j].rq_pages[i];
405 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
406 &bvec.bv_offset);
407
David Howellsaa563d72018-10-20 00:57:56 +0100408 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000409 &bvec, 1, bvec.bv_len);
410 rc = smb_send_kvec(server, &smb_msg, &sent);
411 if (rc < 0)
412 break;
413
414 total_len += sent;
415 }
Jeff Layton97bc00b2012-09-18 16:20:35 -0700416 }
417
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800418unmask:
419 sigprocmask(SIG_SETMASK, &oldmask, NULL);
420
421 /*
422 * If signal is pending but we have already sent the whole packet to
423 * the server we need to return success status to allow a corresponding
424 * mid entry to be kept in the pending requests queue thus allowing
425 * to handle responses from the server by the client.
426 *
427 * If only part of the packet has been sent there is no need to hide
428 * interrupt because the session will be reconnected anyway, so there
429 * won't be any response from the server to handle.
430 */
431
432 if (signal_pending(current) && (total_len != send_length)) {
433 cifs_dbg(FYI, "signal is pending after attempt to send\n");
434 rc = -EINTR;
435 }
436
Jeff Laytonb8eed282012-09-18 16:20:35 -0700437 /* uncork it */
438 val = 0;
439 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
440 (char *)&val, sizeof(val));
441
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000442 if ((total_len > 0) && (total_len != send_length)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500443 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000444 send_length, total_len);
Jeff Layton6f49f462012-09-18 16:20:34 -0700445 /*
446 * If we have only sent part of an SMB then the next SMB could
447 * be taken as the remainder of this one. We need to kill the
448 * socket so the server throws away the partial SMB
449 */
Steve Frenchedf1ae42008-10-29 00:47:57 +0000450 server->tcpStatus = CifsNeedReconnect;
Steve Frenchbf1fdeb2018-07-30 19:23:09 -0500451 trace_smb3_partial_send_reconnect(server->CurrentMid,
452 server->hostname);
Steve Frenchedf1ae42008-10-29 00:47:57 +0000453 }
Long Li9762c2d2017-11-22 17:38:43 -0700454smbd_done:
Jeff Laytond804d412011-01-28 15:05:43 -0500455 if (rc < 0 && rc != -EINTR)
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000456 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
Joe Perchesf96637b2013-05-04 22:12:25 -0500457 rc);
Pavel Shilovskyee139192019-01-10 11:27:28 -0800458 else if (rc > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
461 return rc;
462}
463
Jeff Layton6f49f462012-09-18 16:20:34 -0700464static int
Ronnie Sahlberg1f3a8f52018-08-01 09:26:12 +1000465smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
466 struct smb_rqst *rqst, int flags)
Jeff Layton6f49f462012-09-18 16:20:34 -0700467{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +1000468 struct kvec iov;
469 struct smb2_transform_hdr tr_hdr;
470 struct smb_rqst cur_rqst[MAX_COMPOUND];
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700471 int rc;
Jeff Layton6f49f462012-09-18 16:20:34 -0700472
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700473 if (!(flags & CIFS_TRANSFORM_REQ))
Ronnie Sahlberg1f3a8f52018-08-01 09:26:12 +1000474 return __smb_send_rqst(server, num_rqst, rqst);
475
476 if (num_rqst > MAX_COMPOUND - 1)
477 return -ENOMEM;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700478
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +1000479 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
480 memset(&iov, 0, sizeof(iov));
481 memset(&tr_hdr, 0, sizeof(tr_hdr));
482
483 iov.iov_base = &tr_hdr;
484 iov.iov_len = sizeof(tr_hdr);
485 cur_rqst[0].rq_iov = &iov;
486 cur_rqst[0].rq_nvec = 1;
487
488 if (!server->ops->init_transform_rq) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000489 cifs_server_dbg(VFS, "Encryption requested but transform "
490 "callback is missing\n");
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700491 return -EIO;
492 }
493
Ronnie Sahlberg1f3a8f52018-08-01 09:26:12 +1000494 rc = server->ops->init_transform_rq(server, num_rqst + 1,
495 &cur_rqst[0], rqst);
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700496 if (rc)
497 return rc;
498
Ronnie Sahlberg1f3a8f52018-08-01 09:26:12 +1000499 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
500 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700501 return rc;
Jeff Layton6f49f462012-09-18 16:20:34 -0700502}
503
Jeff Layton0496e022008-12-30 12:39:16 -0500504int
505smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
506 unsigned int smb_buf_length)
507{
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800508 struct kvec iov[2];
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700509 struct smb_rqst rqst = { .rq_iov = iov,
510 .rq_nvec = 2 };
Jeff Layton0496e022008-12-30 12:39:16 -0500511
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800512 iov[0].iov_base = smb_buffer;
513 iov[0].iov_len = 4;
514 iov[1].iov_base = (char *)smb_buffer + 4;
515 iov[1].iov_len = smb_buf_length;
Jeff Layton0496e022008-12-30 12:39:16 -0500516
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000517 return __smb_send_rqst(server, 1, &rqst);
Jeff Layton0496e022008-12-30 12:39:16 -0500518}
519
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300520static int
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000521wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000522 const int timeout, const int flags,
523 unsigned int *instance)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000524{
Pavel Shilovsky5bc59492012-02-21 19:56:08 +0300525 int rc;
Ronnie Sahlberg4230cff2019-03-08 12:58:19 +1000526 int *credits;
527 int optype;
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000528 long int t;
529
530 if (timeout < 0)
531 t = MAX_JIFFY_OFFSET;
532 else
533 t = msecs_to_jiffies(timeout);
Ronnie Sahlberg4230cff2019-03-08 12:58:19 +1000534
535 optype = flags & CIFS_OP_MASK;
Pavel Shilovsky5bc59492012-02-21 19:56:08 +0300536
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800537 *instance = 0;
538
Ronnie Sahlberg4230cff2019-03-08 12:58:19 +1000539 credits = server->ops->get_credits_field(server, optype);
540 /* Since an echo is already inflight, no need to wait to send another */
541 if (*credits <= 0 && optype == CIFS_ECHO_OP)
542 return -EAGAIN;
543
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300544 spin_lock(&server->req_lock);
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +1000545 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000546 /* oplock breaks must not be held up */
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300547 server->in_flight++;
Steve French1b63f182019-09-09 22:57:11 -0500548 if (server->in_flight > server->max_in_flight)
549 server->max_in_flight = server->in_flight;
Pavel Shilovskybc205ed2012-03-15 13:22:27 +0300550 *credits -= 1;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800551 *instance = server->reconnect_instance;
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300552 spin_unlock(&server->req_lock);
Volker Lendecke27a97a62008-12-08 20:59:39 +0000553 return 0;
554 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000555
Volker Lendecke27a97a62008-12-08 20:59:39 +0000556 while (1) {
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000557 if (*credits < num_credits) {
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300558 spin_unlock(&server->req_lock);
Steve French789e6662011-08-09 18:44:44 +0000559 cifs_num_waiters_inc(server);
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000560 rc = wait_event_killable_timeout(server->request_q,
561 has_credits(server, credits, num_credits), t);
Steve French789e6662011-08-09 18:44:44 +0000562 cifs_num_waiters_dec(server);
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000563 if (!rc) {
Steve French7937ca92019-03-09 20:29:55 -0600564 trace_smb3_credit_timeout(server->CurrentMid,
565 server->hostname, num_credits);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000566 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000567 timeout);
568 return -ENOTSUPP;
569 }
570 if (rc == -ERESTARTSYS)
571 return -ERESTARTSYS;
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300572 spin_lock(&server->req_lock);
Volker Lendecke27a97a62008-12-08 20:59:39 +0000573 } else {
Jeff Laytonc5797a92011-01-11 07:24:01 -0500574 if (server->tcpStatus == CifsExiting) {
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300575 spin_unlock(&server->req_lock);
Volker Lendecke27a97a62008-12-08 20:59:39 +0000576 return -ENOENT;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000577 }
Volker Lendecke27a97a62008-12-08 20:59:39 +0000578
Pavel Shilovsky2d86dbc2012-02-06 15:59:18 +0400579 /*
Ronnie Sahlberg16b34aa2019-03-08 12:58:21 +1000580 * For normal commands, reserve the last MAX_COMPOUND
581 * credits to compound requests.
582 * Otherwise these compounds could be permanently
583 * starved for credits by single-credit requests.
584 *
585 * To prevent spinning CPU, block this thread until
586 * there are >MAX_COMPOUND credits available.
587 * But only do this is we already have a lot of
588 * credits in flight to avoid triggering this check
589 * for servers that are slow to hand out credits on
590 * new sessions.
591 */
592 if (!optype && num_credits == 1 &&
593 server->in_flight > 2 * MAX_COMPOUND &&
594 *credits <= MAX_COMPOUND) {
595 spin_unlock(&server->req_lock);
596 cifs_num_waiters_inc(server);
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000597 rc = wait_event_killable_timeout(
598 server->request_q,
Ronnie Sahlberg16b34aa2019-03-08 12:58:21 +1000599 has_credits(server, credits,
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000600 MAX_COMPOUND + 1),
601 t);
Ronnie Sahlberg16b34aa2019-03-08 12:58:21 +1000602 cifs_num_waiters_dec(server);
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000603 if (!rc) {
Steve French7937ca92019-03-09 20:29:55 -0600604 trace_smb3_credit_timeout(
605 server->CurrentMid,
606 server->hostname, num_credits);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000607 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000608 timeout);
609 return -ENOTSUPP;
610 }
611 if (rc == -ERESTARTSYS)
612 return -ERESTARTSYS;
Ronnie Sahlberg16b34aa2019-03-08 12:58:21 +1000613 spin_lock(&server->req_lock);
614 continue;
615 }
616
617 /*
Pavel Shilovsky2d86dbc2012-02-06 15:59:18 +0400618 * Can not count locking commands against total
619 * as they are allowed to block on server.
620 */
Volker Lendecke27a97a62008-12-08 20:59:39 +0000621
622 /* update # of requests on the wire to server */
Ronnie Sahlberg4230cff2019-03-08 12:58:19 +1000623 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000624 *credits -= num_credits;
625 server->in_flight += num_credits;
Steve French1b63f182019-09-09 22:57:11 -0500626 if (server->in_flight > server->max_in_flight)
627 server->max_in_flight = server->in_flight;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800628 *instance = server->reconnect_instance;
Pavel Shilovsky2d86dbc2012-02-06 15:59:18 +0400629 }
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300630 spin_unlock(&server->req_lock);
Volker Lendecke27a97a62008-12-08 20:59:39 +0000631 break;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000632 }
633 }
634 return 0;
635}
636
Pavel Shilovskybc205ed2012-03-15 13:22:27 +0300637static int
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +1000638wait_for_free_request(struct TCP_Server_Info *server, const int flags,
639 unsigned int *instance)
Pavel Shilovskybc205ed2012-03-15 13:22:27 +0300640{
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000641 return wait_for_free_credits(server, 1, -1, flags,
642 instance);
Pavel Shilovskybc205ed2012-03-15 13:22:27 +0300643}
644
Ronnie Sahlberg257b7802019-03-11 12:18:58 +1000645static int
646wait_for_compound_request(struct TCP_Server_Info *server, int num,
647 const int flags, unsigned int *instance)
648{
649 int *credits;
650
651 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
652
653 spin_lock(&server->req_lock);
654 if (*credits < num) {
655 /*
656 * Return immediately if not too many requests in flight since
657 * we will likely be stuck on waiting for credits.
658 */
659 if (server->in_flight < num - *credits) {
660 spin_unlock(&server->req_lock);
661 return -ENOTSUPP;
662 }
663 }
664 spin_unlock(&server->req_lock);
665
666 return wait_for_free_credits(server, num, 60000, flags,
667 instance);
668}
669
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400670int
671cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800672 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400673{
674 *num = size;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800675 credits->value = 0;
676 credits->instance = server->reconnect_instance;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400677 return 0;
678}
679
Steve French96daf2b2011-05-27 04:34:02 +0000680static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000681 struct mid_q_entry **ppmidQ)
682{
683 if (ses->server->tcpStatus == CifsExiting) {
684 return -ENOENT;
Volker Lendecke8fbbd362008-12-06 13:12:34 +0100685 }
686
687 if (ses->server->tcpStatus == CifsNeedReconnect) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500688 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000689 return -EAGAIN;
Volker Lendecke8fbbd362008-12-06 13:12:34 +0100690 }
691
Shirish Pargaonkar7f485582013-10-12 10:06:03 -0500692 if (ses->status == CifsNew) {
Steve French79a58d12007-07-06 22:44:50 +0000693 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
Steve Frenchad7a2922008-02-07 23:25:02 +0000694 (in_buf->Command != SMB_COM_NEGOTIATE))
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000695 return -EAGAIN;
Steve Frenchad7a2922008-02-07 23:25:02 +0000696 /* else ok - we are setting up session */
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000697 }
Shirish Pargaonkar7f485582013-10-12 10:06:03 -0500698
699 if (ses->status == CifsExiting) {
700 /* check if SMB session is bad because we are setting it up */
701 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
702 return -EAGAIN;
703 /* else ok - we are shutting down session */
704 }
705
Jeff Layton24b9b062008-12-01 07:09:34 -0500706 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
Steve French26f57362007-08-30 22:09:15 +0000707 if (*ppmidQ == NULL)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000708 return -ENOMEM;
Jeff Laytonddc8cf82011-01-11 07:24:02 -0500709 spin_lock(&GlobalMid_Lock);
710 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
711 spin_unlock(&GlobalMid_Lock);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000712 return 0;
713}
714
Jeff Layton0ade6402011-01-11 07:24:02 -0500715static int
716wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000717{
Jeff Layton0ade6402011-01-11 07:24:02 -0500718 int error;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000719
Colin Cross5853cc22013-05-07 17:52:05 +0000720 error = wait_event_freezekillable_unsafe(server->response_q,
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -0400721 midQ->mid_state != MID_REQUEST_SUBMITTED);
Jeff Layton0ade6402011-01-11 07:24:02 -0500722 if (error < 0)
723 return -ERESTARTSYS;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000724
Jeff Layton0ade6402011-01-11 07:24:02 -0500725 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000726}
727
Jeff Laytonfec344e2012-09-18 16:20:35 -0700728struct mid_q_entry *
729cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400730{
731 int rc;
Jeff Laytonfec344e2012-09-18 16:20:35 -0700732 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400733 struct mid_q_entry *mid;
734
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800735 if (rqst->rq_iov[0].iov_len != 4 ||
736 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
737 return ERR_PTR(-EIO);
738
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400739 /* enable signing if server requires it */
Jeff Layton38d77c52013-05-26 07:01:00 -0400740 if (server->sign)
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400741 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
742
743 mid = AllocMidQEntry(hdr, server);
744 if (mid == NULL)
Jeff Laytonfec344e2012-09-18 16:20:35 -0700745 return ERR_PTR(-ENOMEM);
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400746
Jeff Laytonfec344e2012-09-18 16:20:35 -0700747 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
Sachin Prabhuffc61cc2012-07-11 12:28:05 +0100748 if (rc) {
749 DeleteMidQEntry(mid);
Jeff Laytonfec344e2012-09-18 16:20:35 -0700750 return ERR_PTR(rc);
Sachin Prabhuffc61cc2012-07-11 12:28:05 +0100751 }
752
Jeff Laytonfec344e2012-09-18 16:20:35 -0700753 return mid;
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400754}
Steve French133672e2007-11-13 22:41:37 +0000755
756/*
Jeff Laytona6827c12011-01-11 07:24:21 -0500757 * Send a SMB request and set the callback function in the mid to handle
758 * the result. Caller is responsible for dealing with timeouts.
759 */
760int
Jeff Laytonfec344e2012-09-18 16:20:35 -0700761cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
Pavel Shilovsky9b7c18a2016-11-16 14:06:17 -0800762 mid_receive_t *receive, mid_callback_t *callback,
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -0800763 mid_handle_t *handle, void *cbdata, const int flags,
764 const struct cifs_credits *exist_credits)
Jeff Laytona6827c12011-01-11 07:24:21 -0500765{
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +1000766 int rc;
Jeff Laytona6827c12011-01-11 07:24:21 -0500767 struct mid_q_entry *mid;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800768 struct cifs_credits credits = { .value = 0, .instance = 0 };
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800769 unsigned int instance;
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +1000770 int optype;
Jeff Laytona6827c12011-01-11 07:24:21 -0500771
Pavel Shilovskya891f0f2012-05-23 16:14:34 +0400772 optype = flags & CIFS_OP_MASK;
773
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400774 if ((flags & CIFS_HAS_CREDITS) == 0) {
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +1000775 rc = wait_for_free_request(server, flags, &instance);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400776 if (rc)
777 return rc;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800778 credits.value = 1;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800779 credits.instance = instance;
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -0800780 } else
781 instance = exist_credits->instance;
Jeff Laytona6827c12011-01-11 07:24:21 -0500782
783 mutex_lock(&server->srv_mutex);
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -0800784
785 /*
786 * We can't use credits obtained from the previous session to send this
787 * request. Check if there were reconnects after we obtained credits and
788 * return -EAGAIN in such cases to let callers handle it.
789 */
790 if (instance != server->reconnect_instance) {
791 mutex_unlock(&server->srv_mutex);
792 add_credits_and_wake_if(server, &credits, optype);
793 return -EAGAIN;
794 }
795
Jeff Laytonfec344e2012-09-18 16:20:35 -0700796 mid = server->ops->setup_async_request(server, rqst);
797 if (IS_ERR(mid)) {
Jeff Laytona6827c12011-01-11 07:24:21 -0500798 mutex_unlock(&server->srv_mutex);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800799 add_credits_and_wake_if(server, &credits, optype);
Jeff Laytonfec344e2012-09-18 16:20:35 -0700800 return PTR_ERR(mid);
Jeff Laytona6827c12011-01-11 07:24:21 -0500801 }
802
Jeff Layton44d22d82011-10-19 15:29:49 -0400803 mid->receive = receive;
Jeff Laytona6827c12011-01-11 07:24:21 -0500804 mid->callback = callback;
805 mid->callback_data = cbdata;
Pavel Shilovsky9b7c18a2016-11-16 14:06:17 -0800806 mid->handle = handle;
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -0400807 mid->mid_state = MID_REQUEST_SUBMITTED;
Steve French789e6662011-08-09 18:44:44 +0000808
Sachin Prabhuffc61cc2012-07-11 12:28:05 +0100809 /* put it on the pending_mid_q */
810 spin_lock(&GlobalMid_Lock);
811 list_add_tail(&mid->qhead, &server->pending_mid_q);
812 spin_unlock(&GlobalMid_Lock);
813
Long Li93d2cb62017-06-28 15:55:55 -0700814 /*
815 * Need to store the time in mid before calling I/O. For call_async,
816 * I/O response may come back and free the mid entry on another thread.
817 */
818 cifs_save_when_sent(mid);
Steve French789e6662011-08-09 18:44:44 +0000819 cifs_in_send_inc(server);
Ronnie Sahlberg1f3a8f52018-08-01 09:26:12 +1000820 rc = smb_send_rqst(server, 1, rqst, flags);
Steve French789e6662011-08-09 18:44:44 +0000821 cifs_in_send_dec(server);
Jeff Laytonad313cb2013-04-03 10:27:36 -0400822
Rabin Vincent820962d2015-12-23 07:32:41 +0100823 if (rc < 0) {
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800824 revert_current_mid(server, mid->credits);
Jeff Laytonad313cb2013-04-03 10:27:36 -0400825 server->sequence_number -= 2;
Rabin Vincent820962d2015-12-23 07:32:41 +0100826 cifs_delete_mid(mid);
827 }
828
Jeff Laytona6827c12011-01-11 07:24:21 -0500829 mutex_unlock(&server->srv_mutex);
Steve French789e6662011-08-09 18:44:44 +0000830
Sachin Prabhuffc61cc2012-07-11 12:28:05 +0100831 if (rc == 0)
832 return 0;
Jeff Laytona6827c12011-01-11 07:24:21 -0500833
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800834 add_credits_and_wake_if(server, &credits, optype);
Jeff Laytona6827c12011-01-11 07:24:21 -0500835 return rc;
836}
837
838/*
Steve French133672e2007-11-13 22:41:37 +0000839 *
840 * Send an SMB Request. No response info (other than return code)
841 * needs to be parsed.
842 *
843 * flags indicate the type of request buffer and how long to wait
844 * and whether to log NT STATUS code (error) before mapping it to POSIX error
845 *
846 */
847int
Steve French96daf2b2011-05-27 04:34:02 +0000848SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400849 char *in_buf, int flags)
Steve French133672e2007-11-13 22:41:37 +0000850{
851 int rc;
852 struct kvec iov[1];
Pavel Shilovskyda502f72016-10-25 11:38:47 -0700853 struct kvec rsp_iov;
Steve French133672e2007-11-13 22:41:37 +0000854 int resp_buf_type;
855
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400856 iov[0].iov_base = in_buf;
857 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +1000858 flags |= CIFS_NO_RSP_BUF;
Pavel Shilovskyda502f72016-10-25 11:38:47 -0700859 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
Joe Perchesf96637b2013-05-04 22:12:25 -0500860 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
Steve French90c81e02008-02-12 20:32:36 +0000861
Steve French133672e2007-11-13 22:41:37 +0000862 return rc;
863}
864
Jeff Layton053d5032011-01-11 07:24:02 -0500865static int
Jeff Layton3c1105d2011-05-22 07:09:13 -0400866cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
Jeff Layton053d5032011-01-11 07:24:02 -0500867{
868 int rc = 0;
869
Joe Perchesf96637b2013-05-04 22:12:25 -0500870 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
871 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
Jeff Layton053d5032011-01-11 07:24:02 -0500872
Jeff Layton74dd92a2011-01-11 07:24:02 -0500873 spin_lock(&GlobalMid_Lock);
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -0400874 switch (mid->mid_state) {
Jeff Layton74dd92a2011-01-11 07:24:02 -0500875 case MID_RESPONSE_RECEIVED:
Jeff Layton053d5032011-01-11 07:24:02 -0500876 spin_unlock(&GlobalMid_Lock);
877 return rc;
Jeff Layton74dd92a2011-01-11 07:24:02 -0500878 case MID_RETRY_NEEDED:
879 rc = -EAGAIN;
880 break;
Jeff Layton71823ba2011-02-10 08:03:50 -0500881 case MID_RESPONSE_MALFORMED:
882 rc = -EIO;
883 break;
Jeff Layton3c1105d2011-05-22 07:09:13 -0400884 case MID_SHUTDOWN:
885 rc = -EHOSTDOWN;
886 break;
Jeff Layton74dd92a2011-01-11 07:24:02 -0500887 default:
Pavel Shilovskyabe57072019-10-22 08:41:42 -0700888 if (!(mid->mid_flags & MID_DELETED)) {
889 list_del_init(&mid->qhead);
890 mid->mid_flags |= MID_DELETED;
891 }
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000892 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
Joe Perchesf96637b2013-05-04 22:12:25 -0500893 __func__, mid->mid, mid->mid_state);
Jeff Layton74dd92a2011-01-11 07:24:02 -0500894 rc = -EIO;
Jeff Layton053d5032011-01-11 07:24:02 -0500895 }
896 spin_unlock(&GlobalMid_Lock);
897
Jeff Layton2b84a36c2011-01-11 07:24:21 -0500898 DeleteMidQEntry(mid);
Jeff Layton053d5032011-01-11 07:24:02 -0500899 return rc;
900}
901
Jeff Layton121b0462012-05-15 12:21:10 -0400902static inline int
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -0800903send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
904 struct mid_q_entry *mid)
Jeff Layton76dcc262011-01-11 07:24:24 -0500905{
Jeff Layton121b0462012-05-15 12:21:10 -0400906 return server->ops->send_cancel ?
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -0800907 server->ops->send_cancel(server, rqst, mid) : 0;
Jeff Layton76dcc262011-01-11 07:24:24 -0500908}
909
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910int
Jeff Layton2c8f9812011-05-19 16:22:52 -0400911cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
912 bool log_error)
913{
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400914 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
Jeff Layton826a95e2011-10-11 06:41:32 -0400915
916 dump_smb(mid->resp_buf, min_t(u32, 92, len));
Jeff Layton2c8f9812011-05-19 16:22:52 -0400917
918 /* convert the length into a more usable form */
Jeff Layton38d77c52013-05-26 07:01:00 -0400919 if (server->sign) {
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800920 struct kvec iov[2];
Steve French985e4ff02012-08-03 09:42:45 -0500921 int rc = 0;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800922 struct smb_rqst rqst = { .rq_iov = iov,
923 .rq_nvec = 2 };
Jeff Layton826a95e2011-10-11 06:41:32 -0400924
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800925 iov[0].iov_base = mid->resp_buf;
926 iov[0].iov_len = 4;
927 iov[1].iov_base = (char *)mid->resp_buf + 4;
928 iov[1].iov_len = len - 4;
Jeff Layton2c8f9812011-05-19 16:22:52 -0400929 /* FIXME: add code to kill session */
Jeff Laytonbf5ea0e2012-09-18 16:20:34 -0700930 rc = cifs_verify_signature(&rqst, server,
Jeff Layton0124cc42013-04-03 11:55:03 -0400931 mid->sequence_number);
Steve French985e4ff02012-08-03 09:42:45 -0500932 if (rc)
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +1000933 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
Joe Perchesf96637b2013-05-04 22:12:25 -0500934 rc);
Jeff Layton2c8f9812011-05-19 16:22:52 -0400935 }
936
937 /* BB special case reconnect tid and uid here? */
938 return map_smb_to_linux_error(mid->resp_buf, log_error);
939}
940
Jeff Laytonfec344e2012-09-18 16:20:35 -0700941struct mid_q_entry *
Aurelien Aptelf780bd32019-09-20 06:08:34 +0200942cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
943 struct smb_rqst *rqst)
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400944{
945 int rc;
Jeff Laytonfec344e2012-09-18 16:20:35 -0700946 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400947 struct mid_q_entry *mid;
948
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800949 if (rqst->rq_iov[0].iov_len != 4 ||
950 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
951 return ERR_PTR(-EIO);
952
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400953 rc = allocate_mid(ses, hdr, &mid);
954 if (rc)
Jeff Laytonfec344e2012-09-18 16:20:35 -0700955 return ERR_PTR(rc);
956 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
957 if (rc) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700958 cifs_delete_mid(mid);
Jeff Laytonfec344e2012-09-18 16:20:35 -0700959 return ERR_PTR(rc);
960 }
961 return mid;
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400962}
963
Ronnie Sahlberg4e34feb2018-08-30 10:13:00 +1000964static void
Pavel Shilovskyee258d72019-01-03 15:53:10 -0800965cifs_compound_callback(struct mid_q_entry *mid)
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -0800966{
967 struct TCP_Server_Info *server = mid->server;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800968 struct cifs_credits credits;
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -0800969
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800970 credits.value = server->ops->get_credits(mid);
971 credits.instance = server->reconnect_instance;
972
973 add_credits(server, &credits, mid->optype);
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -0800974}
975
Pavel Shilovskyee258d72019-01-03 15:53:10 -0800976static void
977cifs_compound_last_callback(struct mid_q_entry *mid)
978{
979 cifs_compound_callback(mid);
980 cifs_wake_up_task(mid);
981}
982
983static void
984cifs_cancelled_callback(struct mid_q_entry *mid)
985{
986 cifs_compound_callback(mid);
987 DeleteMidQEntry(mid);
988}
989
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -0800990int
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +1000991compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
992 const int flags, const int num_rqst, struct smb_rqst *rqst,
993 int *resp_buf_type, struct kvec *resp_iov)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994{
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +1000995 int i, j, optype, rc = 0;
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +1000996 struct mid_q_entry *midQ[MAX_COMPOUND];
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -0800997 bool cancelled_mid[MAX_COMPOUND] = {false};
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800998 struct cifs_credits credits[MAX_COMPOUND] = {
999 { .value = 0, .instance = 0 }
1000 };
1001 unsigned int instance;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08001002 char *buf;
Aurelien Aptel3190b592019-06-24 13:00:12 -05001003 struct TCP_Server_Info *server;
Steve French50c2f752007-07-13 00:33:32 +00001004
Pavel Shilovskya891f0f2012-05-23 16:14:34 +04001005 optype = flags & CIFS_OP_MASK;
Steve French133672e2007-11-13 22:41:37 +00001006
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001007 for (i = 0; i < num_rqst; i++)
1008 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
Steve French4b8f9302006-02-26 16:41:18 +00001010 if ((ses == NULL) || (ses->server == NULL)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001011 cifs_dbg(VFS, "Null session\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 return -EIO;
1013 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
Aurelien Apteld70e9fa2019-09-20 06:31:10 +02001015 if (!ses->binding) {
1016 uint index = 0;
1017
1018 if (ses->chan_count > 1) {
1019 index = (uint)atomic_inc_return(&ses->chan_seq);
1020 index %= ses->chan_count;
1021 }
1022 server = ses->chans[index].server;
1023 } else {
1024 server = cifs_ses_server(ses);
1025 }
1026
Aurelien Aptel3190b592019-06-24 13:00:12 -05001027 if (server->tcpStatus == CifsExiting)
Steve French31ca3bc2005-04-28 22:41:11 -07001028 return -ENOENT;
1029
Pavel Shilovsky792af7b2012-03-23 14:28:02 -04001030 /*
Ronnie Sahlberg257b7802019-03-11 12:18:58 +10001031 * Wait for all the requests to become available.
Pavel Shilovsky7091bca2019-01-30 16:58:09 -08001032 * This approach still leaves the possibility to be stuck waiting for
1033 * credits if the server doesn't grant credits to the outstanding
Ronnie Sahlberg257b7802019-03-11 12:18:58 +10001034 * requests and if the client is completely idle, not generating any
1035 * other requests.
1036 * This can be handled by the eventual session reconnect.
Pavel Shilovsky792af7b2012-03-23 14:28:02 -04001037 */
Aurelien Aptel3190b592019-06-24 13:00:12 -05001038 rc = wait_for_compound_request(server, num_rqst, flags,
Ronnie Sahlberg257b7802019-03-11 12:18:58 +10001039 &instance);
1040 if (rc)
1041 return rc;
1042
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001043 for (i = 0; i < num_rqst; i++) {
Ronnie Sahlberg257b7802019-03-11 12:18:58 +10001044 credits[i].value = 1;
1045 credits[i].instance = instance;
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001046 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001047
Pavel Shilovsky792af7b2012-03-23 14:28:02 -04001048 /*
1049 * Make sure that we sign in the same order that we send on this socket
1050 * and avoid races inside tcp sendmsg code that could cause corruption
1051 * of smb data.
1052 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Aurelien Aptel3190b592019-06-24 13:00:12 -05001054 mutex_lock(&server->srv_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055
Pavel Shilovsky97ea4992019-01-15 16:07:52 -08001056 /*
1057 * All the parts of the compound chain belong obtained credits from the
Ronnie Sahlberg257b7802019-03-11 12:18:58 +10001058 * same session. We can not use credits obtained from the previous
Pavel Shilovsky97ea4992019-01-15 16:07:52 -08001059 * session to send this request. Check if there were reconnects after
1060 * we obtained credits and return -EAGAIN in such cases to let callers
1061 * handle it.
1062 */
Aurelien Aptel3190b592019-06-24 13:00:12 -05001063 if (instance != server->reconnect_instance) {
1064 mutex_unlock(&server->srv_mutex);
Pavel Shilovsky97ea4992019-01-15 16:07:52 -08001065 for (j = 0; j < num_rqst; j++)
Aurelien Aptel3190b592019-06-24 13:00:12 -05001066 add_credits(server, &credits[j], optype);
Pavel Shilovsky97ea4992019-01-15 16:07:52 -08001067 return -EAGAIN;
1068 }
1069
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001070 for (i = 0; i < num_rqst; i++) {
Aurelien Aptelf780bd32019-09-20 06:08:34 +02001071 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001072 if (IS_ERR(midQ[i])) {
Aurelien Aptel3190b592019-06-24 13:00:12 -05001073 revert_current_mid(server, i);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001074 for (j = 0; j < i; j++)
1075 cifs_delete_mid(midQ[j]);
Aurelien Aptel3190b592019-06-24 13:00:12 -05001076 mutex_unlock(&server->srv_mutex);
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001077
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001078 /* Update # of requests on wire to server */
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001079 for (j = 0; j < num_rqst; j++)
Aurelien Aptel3190b592019-06-24 13:00:12 -05001080 add_credits(server, &credits[j], optype);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001081 return PTR_ERR(midQ[i]);
1082 }
1083
1084 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -08001085 midQ[i]->optype = optype;
Ronnie Sahlberg4e34feb2018-08-30 10:13:00 +10001086 /*
Pavel Shilovskyee258d72019-01-03 15:53:10 -08001087 * Invoke callback for every part of the compound chain
1088 * to calculate credits properly. Wake up this thread only when
1089 * the last element is received.
Ronnie Sahlberg4e34feb2018-08-30 10:13:00 +10001090 */
1091 if (i < num_rqst - 1)
Pavel Shilovskyee258d72019-01-03 15:53:10 -08001092 midQ[i]->callback = cifs_compound_callback;
1093 else
1094 midQ[i]->callback = cifs_compound_last_callback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 }
Aurelien Aptel3190b592019-06-24 13:00:12 -05001096 cifs_in_send_inc(server);
1097 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1098 cifs_in_send_dec(server);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001099
1100 for (i = 0; i < num_rqst; i++)
1101 cifs_save_when_sent(midQ[i]);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001102
Pavel Shilovskyc781af72019-03-04 14:02:50 -08001103 if (rc < 0) {
Aurelien Aptel3190b592019-06-24 13:00:12 -05001104 revert_current_mid(server, num_rqst);
1105 server->sequence_number -= 2;
Pavel Shilovskyc781af72019-03-04 14:02:50 -08001106 }
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001107
Aurelien Aptel3190b592019-06-24 13:00:12 -05001108 mutex_unlock(&server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001109
Ronnie Sahlbergd69cb722019-05-01 12:03:41 +10001110 /*
1111 * If sending failed for some reason or it is an oplock break that we
1112 * will not receive a response to - return credits back
1113 */
1114 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
Pavel Shilovskyee258d72019-01-03 15:53:10 -08001115 for (i = 0; i < num_rqst; i++)
Aurelien Aptel3190b592019-06-24 13:00:12 -05001116 add_credits(server, &credits[i], optype);
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001117 goto out;
Pavel Shilovskyee258d72019-01-03 15:53:10 -08001118 }
1119
1120 /*
1121 * At this point the request is passed to the network stack - we assume
1122 * that any credits taken from the server structure on the client have
1123 * been spent and we can't return them back. Once we receive responses
1124 * we will collect credits granted by the server in the mid callbacks
1125 * and add those credits to the server structure.
1126 */
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001127
1128 /*
1129 * Compounding is never used during session establish.
1130 */
1131 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1132 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1133 rqst[0].rq_nvec);
1134
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001135 for (i = 0; i < num_rqst; i++) {
Aurelien Aptel3190b592019-06-24 13:00:12 -05001136 rc = wait_for_response(server, midQ[i]);
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -08001137 if (rc != 0)
1138 break;
1139 }
1140 if (rc != 0) {
1141 for (; i < num_rqst; i++) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001142 cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
Steve French43de1db2018-10-23 21:04:57 -05001143 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
Aurelien Aptel3190b592019-06-24 13:00:12 -05001144 send_cancel(server, &rqst[i], midQ[i]);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001145 spin_lock(&GlobalMid_Lock);
Pavel Shilovsky7b718432019-11-21 11:35:14 -08001146 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001147 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -08001148 midQ[i]->callback = cifs_cancelled_callback;
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001149 cancelled_mid[i] = true;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08001150 credits[i].value = 0;
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001151 }
Jeff Layton1be912d2011-01-28 07:08:28 -05001152 spin_unlock(&GlobalMid_Lock);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001153 }
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001154 }
1155
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001156 for (i = 0; i < num_rqst; i++) {
1157 if (rc < 0)
1158 goto out;
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001159
Aurelien Aptel3190b592019-06-24 13:00:12 -05001160 rc = cifs_sync_mid_result(midQ[i], server);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001161 if (rc != 0) {
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001162 /* mark this mid as cancelled to not free it below */
1163 cancelled_mid[i] = true;
1164 goto out;
Jeff Layton1be912d2011-01-28 07:08:28 -05001165 }
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001166
1167 if (!midQ[i]->resp_buf ||
1168 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1169 rc = -EIO;
1170 cifs_dbg(FYI, "Bad MID state?\n");
1171 goto out;
1172 }
1173
1174 buf = (char *)midQ[i]->resp_buf;
1175 resp_iov[i].iov_base = buf;
1176 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
Aurelien Aptel3190b592019-06-24 13:00:12 -05001177 server->vals->header_preamble_size;
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001178
1179 if (midQ[i]->large_buf)
1180 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1181 else
1182 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1183
Aurelien Aptel3190b592019-06-24 13:00:12 -05001184 rc = server->ops->check_receive(midQ[i], server,
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001185 flags & CIFS_LOG_ERROR);
1186
1187 /* mark it so buf will not be freed by cifs_delete_mid */
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10001188 if ((flags & CIFS_NO_RSP_BUF) == 0)
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001189 midQ[i]->resp_buf = NULL;
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001190
Jeff Layton1be912d2011-01-28 07:08:28 -05001191 }
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001192
1193 /*
1194 * Compounding is never used during session establish.
1195 */
1196 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1197 struct kvec iov = {
1198 .iov_base = resp_iov[0].iov_base,
1199 .iov_len = resp_iov[0].iov_len
1200 };
1201 smb311_update_preauth_hash(ses, &iov, 1);
1202 }
1203
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001204out:
Ronnie Sahlberg4e34feb2018-08-30 10:13:00 +10001205 /*
1206 * This will dequeue all mids. After this it is important that the
1207 * demultiplex_thread will not process any of these mids any futher.
1208 * This is prevented above by using a noop callback that will not
1209 * wake this thread except for the very last PDU.
1210 */
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001211 for (i = 0; i < num_rqst; i++) {
1212 if (!cancelled_mid[i])
1213 cifs_delete_mid(midQ[i]);
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
1216 return rc;
1217}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219int
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001220cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1221 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1222 struct kvec *resp_iov)
1223{
1224 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1225 resp_iov);
1226}
1227
1228int
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08001229SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1230 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1231 const int flags, struct kvec *resp_iov)
1232{
1233 struct smb_rqst rqst;
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +11001234 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08001235 int rc;
1236
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +11001237 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
Kees Cook6da2ec52018-06-12 13:55:00 -07001238 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1239 GFP_KERNEL);
Steve French117e3b72018-04-22 10:24:19 -05001240 if (!new_iov) {
1241 /* otherwise cifs_send_recv below sets resp_buf_type */
1242 *resp_buf_type = CIFS_NO_BUFFER;
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +11001243 return -ENOMEM;
Steve French117e3b72018-04-22 10:24:19 -05001244 }
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +11001245 } else
1246 new_iov = s_iov;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08001247
1248 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1249 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1250
1251 new_iov[0].iov_base = new_iov[1].iov_base;
1252 new_iov[0].iov_len = 4;
1253 new_iov[1].iov_base += 4;
1254 new_iov[1].iov_len -= 4;
1255
1256 memset(&rqst, 0, sizeof(struct smb_rqst));
1257 rqst.rq_iov = new_iov;
1258 rqst.rq_nvec = n_vec + 1;
1259
1260 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +11001261 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1262 kfree(new_iov);
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08001263 return rc;
1264}
1265
1266int
Steve French96daf2b2011-05-27 04:34:02 +00001267SendReceive(const unsigned int xid, struct cifs_ses *ses,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +10001269 int *pbytes_returned, const int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270{
1271 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 struct mid_q_entry *midQ;
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001273 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1274 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1275 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08001276 struct cifs_credits credits = { .value = 1, .instance = 0 };
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01001277 struct TCP_Server_Info *server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278
1279 if (ses == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001280 cifs_dbg(VFS, "Null smb session\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 return -EIO;
1282 }
Colin Ian Kingac6ad7a2019-09-02 16:10:59 +01001283 server = ses->server;
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001284 if (server == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001285 cifs_dbg(VFS, "Null tcp session\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 return -EIO;
1287 }
1288
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001289 if (server->tcpStatus == CifsExiting)
Steve French31ca3bc2005-04-28 22:41:11 -07001290 return -ENOENT;
1291
Steve French79a58d12007-07-06 22:44:50 +00001292 /* Ensure that we do not send more than 50 overlapping requests
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 to the same server. We may make this configurable later or
1294 use ses->maxReq */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001296 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001297 cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001298 len);
Volker Lendecke6d9c6d52008-12-08 20:50:24 +00001299 return -EIO;
1300 }
1301
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001302 rc = wait_for_free_request(server, flags, &credits.instance);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001303 if (rc)
1304 return rc;
1305
Steve French79a58d12007-07-06 22:44:50 +00001306 /* make sure that we sign in the same order that we send on this socket
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 and avoid races inside tcp sendmsg code that could cause corruption
1308 of smb data */
1309
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001310 mutex_lock(&server->srv_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001312 rc = allocate_mid(ses, in_buf, &midQ);
1313 if (rc) {
Dan Carpenter8bd37542019-10-25 13:35:08 +03001314 mutex_unlock(&server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001315 /* Update # of requests on wire to server */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001316 add_credits(server, &credits, 0);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001317 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 }
1319
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001320 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
Volker Lendecke829049c2008-12-06 16:00:53 +01001321 if (rc) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001322 mutex_unlock(&server->srv_mutex);
Volker Lendecke829049c2008-12-06 16:00:53 +01001323 goto out;
1324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001326 midQ->mid_state = MID_REQUEST_SUBMITTED;
Steve French789e6662011-08-09 18:44:44 +00001327
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001328 cifs_in_send_inc(server);
1329 rc = smb_send(server, in_buf, len);
1330 cifs_in_send_dec(server);
Steve French789e6662011-08-09 18:44:44 +00001331 cifs_save_when_sent(midQ);
Jeff Laytonad313cb2013-04-03 10:27:36 -04001332
1333 if (rc < 0)
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001334 server->sequence_number -= 2;
Jeff Laytonad313cb2013-04-03 10:27:36 -04001335
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001336 mutex_unlock(&server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001337
Steve French79a58d12007-07-06 22:44:50 +00001338 if (rc < 0)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001339 goto out;
1340
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001341 rc = wait_for_response(server, midQ);
Jeff Layton1be912d2011-01-28 07:08:28 -05001342 if (rc != 0) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001343 send_cancel(server, &rqst, midQ);
Jeff Layton1be912d2011-01-28 07:08:28 -05001344 spin_lock(&GlobalMid_Lock);
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001345 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
Jeff Layton1be912d2011-01-28 07:08:28 -05001346 /* no longer considered to be "in-flight" */
1347 midQ->callback = DeleteMidQEntry;
1348 spin_unlock(&GlobalMid_Lock);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001349 add_credits(server, &credits, 0);
Jeff Layton1be912d2011-01-28 07:08:28 -05001350 return rc;
1351 }
1352 spin_unlock(&GlobalMid_Lock);
1353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001355 rc = cifs_sync_mid_result(midQ, server);
Jeff Layton053d5032011-01-11 07:24:02 -05001356 if (rc != 0) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001357 add_credits(server, &credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 return rc;
1359 }
Steve French50c2f752007-07-13 00:33:32 +00001360
Jeff Layton2c8f9812011-05-19 16:22:52 -04001361 if (!midQ->resp_buf || !out_buf ||
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001362 midQ->mid_state != MID_RESPONSE_RECEIVED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 rc = -EIO;
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001364 cifs_server_dbg(VFS, "Bad MID state?\n");
Steve French2b2bdfb2008-12-11 17:26:54 +00001365 goto out;
1366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
Pavel Shilovskyd4e48542012-03-23 14:28:02 -04001368 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
Jeff Layton2c8f9812011-05-19 16:22:52 -04001369 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001370 rc = cifs_check_receive(midQ, server, 0);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001371out:
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001372 cifs_delete_mid(midQ);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001373 add_credits(server, &credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
1375 return rc;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001376}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001378/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1379 blocking lock to return. */
1380
1381static int
Steve French96daf2b2011-05-27 04:34:02 +00001382send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001383 struct smb_hdr *in_buf,
1384 struct smb_hdr *out_buf)
1385{
1386 int bytes_returned;
Steve French96daf2b2011-05-27 04:34:02 +00001387 struct cifs_ses *ses = tcon->ses;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001388 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1389
1390 /* We just modify the current in_buf to change
1391 the type of lock from LOCKING_ANDX_SHARED_LOCK
1392 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1393 LOCKING_ANDX_CANCEL_LOCK. */
1394
1395 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1396 pSMB->Timeout = 0;
Pavel Shilovsky88257362012-05-23 14:01:59 +04001397 pSMB->hdr.Mid = get_next_mid(ses->server);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001398
1399 return SendReceive(xid, ses, in_buf, out_buf,
Jeff Layton77499812011-01-11 07:24:23 -05001400 &bytes_returned, 0);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001401}
1402
1403int
Steve French96daf2b2011-05-27 04:34:02 +00001404SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001405 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1406 int *pbytes_returned)
1407{
1408 int rc = 0;
1409 int rstart = 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001410 struct mid_q_entry *midQ;
Steve French96daf2b2011-05-27 04:34:02 +00001411 struct cifs_ses *ses;
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001412 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1413 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1414 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08001415 unsigned int instance;
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001416 struct TCP_Server_Info *server;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001417
1418 if (tcon == NULL || tcon->ses == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001419 cifs_dbg(VFS, "Null smb session\n");
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001420 return -EIO;
1421 }
1422 ses = tcon->ses;
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001423 server = ses->server;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001424
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001425 if (server == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001426 cifs_dbg(VFS, "Null tcp session\n");
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001427 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 }
1429
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001430 if (server->tcpStatus == CifsExiting)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001431 return -ENOENT;
1432
Steve French79a58d12007-07-06 22:44:50 +00001433 /* Ensure that we do not send more than 50 overlapping requests
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001434 to the same server. We may make this configurable later or
1435 use ses->maxReq */
1436
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001437 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001438 cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001439 len);
Volker Lendecke6d9c6d52008-12-08 20:50:24 +00001440 return -EIO;
1441 }
1442
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001443 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001444 if (rc)
1445 return rc;
1446
Steve French79a58d12007-07-06 22:44:50 +00001447 /* make sure that we sign in the same order that we send on this socket
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001448 and avoid races inside tcp sendmsg code that could cause corruption
1449 of smb data */
1450
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001451 mutex_lock(&server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001452
1453 rc = allocate_mid(ses, in_buf, &midQ);
1454 if (rc) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001455 mutex_unlock(&server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001456 return rc;
1457 }
1458
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001459 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
Volker Lendecke829049c2008-12-06 16:00:53 +01001460 if (rc) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001461 cifs_delete_mid(midQ);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001462 mutex_unlock(&server->srv_mutex);
Volker Lendecke829049c2008-12-06 16:00:53 +01001463 return rc;
1464 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001465
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001466 midQ->mid_state = MID_REQUEST_SUBMITTED;
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001467 cifs_in_send_inc(server);
1468 rc = smb_send(server, in_buf, len);
1469 cifs_in_send_dec(server);
Steve French789e6662011-08-09 18:44:44 +00001470 cifs_save_when_sent(midQ);
Jeff Laytonad313cb2013-04-03 10:27:36 -04001471
1472 if (rc < 0)
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001473 server->sequence_number -= 2;
Jeff Laytonad313cb2013-04-03 10:27:36 -04001474
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001475 mutex_unlock(&server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001476
Steve French79a58d12007-07-06 22:44:50 +00001477 if (rc < 0) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001478 cifs_delete_mid(midQ);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001479 return rc;
1480 }
1481
1482 /* Wait for a reply - allow signals to interrupt. */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001483 rc = wait_event_interruptible(server->response_q,
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001484 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001485 ((server->tcpStatus != CifsGood) &&
1486 (server->tcpStatus != CifsNew)));
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001487
1488 /* Were we interrupted by a signal ? */
1489 if ((rc == -ERESTARTSYS) &&
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001490 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001491 ((server->tcpStatus == CifsGood) ||
1492 (server->tcpStatus == CifsNew))) {
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001493
1494 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1495 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1496 blocking lock to return. */
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001497 rc = send_cancel(server, &rqst, midQ);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001498 if (rc) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001499 cifs_delete_mid(midQ);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001500 return rc;
1501 }
1502 } else {
1503 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1504 to cause the blocking lock to return. */
1505
1506 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1507
1508 /* If we get -ENOLCK back the lock may have
1509 already been removed. Don't exit in this case. */
1510 if (rc && rc != -ENOLCK) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001511 cifs_delete_mid(midQ);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001512 return rc;
1513 }
1514 }
1515
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001516 rc = wait_for_response(server, midQ);
Jeff Layton1be912d2011-01-28 07:08:28 -05001517 if (rc) {
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001518 send_cancel(server, &rqst, midQ);
Jeff Layton1be912d2011-01-28 07:08:28 -05001519 spin_lock(&GlobalMid_Lock);
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001520 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
Jeff Layton1be912d2011-01-28 07:08:28 -05001521 /* no longer considered to be "in-flight" */
1522 midQ->callback = DeleteMidQEntry;
1523 spin_unlock(&GlobalMid_Lock);
1524 return rc;
1525 }
1526 spin_unlock(&GlobalMid_Lock);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001527 }
Jeff Layton1be912d2011-01-28 07:08:28 -05001528
1529 /* We got the response - restart system call. */
1530 rstart = 1;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001531 }
1532
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001533 rc = cifs_sync_mid_result(midQ, server);
Jeff Layton053d5032011-01-11 07:24:02 -05001534 if (rc != 0)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001535 return rc;
Steve French50c2f752007-07-13 00:33:32 +00001536
Volker Lendecke17c8bfe2008-12-06 16:38:19 +01001537 /* rcvd frame is ok */
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001538 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
Volker Lendecke17c8bfe2008-12-06 16:38:19 +01001539 rc = -EIO;
Ronnie Sahlberg3175eb92019-09-04 12:32:41 +10001540 cifs_tcon_dbg(VFS, "Bad MID state?\n");
Volker Lendecke698e96a2008-12-06 16:39:31 +01001541 goto out;
Volker Lendecke17c8bfe2008-12-06 16:38:19 +01001542 }
1543
Pavel Shilovskyd4e48542012-03-23 14:28:02 -04001544 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
Jeff Layton2c8f9812011-05-19 16:22:52 -04001545 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
Ronnie Sahlbergafe6f652019-08-28 17:15:35 +10001546 rc = cifs_check_receive(midQ, server, 0);
Volker Lendecke17c8bfe2008-12-06 16:38:19 +01001547out:
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001548 cifs_delete_mid(midQ);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001549 if (rstart && rc == -EACCES)
1550 return -ERESTARTSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 return rc;
1552}